source
stringlengths
3
92
original_c
stringlengths
26
2.25M
no_omp_formatted
stringlengths
0
2.25M
omp_formatted
stringlengths
0
2.25M
convolution_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 25 + q * 25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int kernel_w = 5; int kernel_h = 5; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_sse(bottom_blob, top_blob, _kernel, _bias, kernel_w, kernel_h, stride_w, stride_h, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 25 + q * 25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int kernel_w = 5; int kernel_h = 5; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_sse(bottom_blob, top_blob, _kernel, _bias, kernel_w, kernel_h, stride_w, stride_h, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 25 + q * 25; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; outptr++; } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } } static void conv5x5s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int kernel_w = 5; int kernel_h = 5; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_sse(bottom_blob, top_blob, _kernel, _bias, kernel_w, kernel_h, stride_w, stride_h, opt); }
zkbdf_eval.c
/* Name: zkbdf_eval.c Author: Tan Teik Guan Description: Eval function for VDF realization using ZKBoo. Modified from MPC_SHA256.c */ /* ============================================================================ Name : MPC_SHA256.c Author : Sobuno Version : 0.1 Description : MPC SHA256 for one block only ============================================================================ */ #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "shared.h" #include <math.h> #include "omp.h" #define CH(e,f,g) ((e & f) ^ ((~e) & g)) int totalRandom = 0; int totalSha = 0; int totalSS = 0; int totalHash = 0; int NUM_ROUNDS = 100; uint32_t rand32() { uint32_t x; x = rand() & 0xff; x |= (rand() & 0xff) << 8; x |= (rand() & 0xff) << 16; x |= (rand() & 0xff) << 24; return x; } void printbits(uint32_t n) { if (n) { printbits(n >> 1); printf("%d", n & 1); } } void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) { z[0] = x[0] ^ y[0]; z[1] = x[1] ^ y[1]; z[2] = x[2] ^ y[2]; } void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint32_t t[3] = { 0 }; t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1]; t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2]; t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) { z[0] = ~x[0]; z[1] = ~x[1]; z[2] = ~x[2]; } void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y[0]^c[0],i); b[1]=GETBIT(y[1]^c[1],i); b[2]=GETBIT(y[2]^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y[0]^c[0]; z[1]=x[1]^y[1]^c[1]; z[2]=x[2]^y[2]^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t c[3] = { 0 }; uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for(int i=0;i<31;i++) { a[0]=GETBIT(x[0]^c[0],i); a[1]=GETBIT(x[1]^c[1],i); a[2]=GETBIT(x[2]^c[2],i); b[0]=GETBIT(y^c[0],i); b[1]=GETBIT(y^c[1],i); b[2]=GETBIT(y^c[2],i); t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i); SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i)); t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i); SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i)); t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i); SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i)); } z[0]=x[0]^y^c[0]; z[1]=x[1]^y^c[1]; z[2]=x[2]^y^c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } int sha256(unsigned char* result, unsigned char* input, int numBits) { uint32_t hA[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; int remainingBits = numBits; int chars; int i; while (remainingBits >= 0) { if (remainingBits > 447) { chars = 64; remainingBits -= 512; } else { chars = remainingBits >> 3; remainingBits = -1; } unsigned char* chunk = calloc(64, 1); //512 bits memcpy(chunk, input, chars); input += chars; if (chars < 64) { chunk[chars] = 0x80; chunk[60] = numBits >> 24; chunk[61] = numBits >> 16; chunk[62] = numBits >> 8; chunk[63] = numBits; } uint32_t w[64]; for (i = 0; i < 16; i++) { w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16) | (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3]; } uint32_t s0, s1; for (i = 16; i < 64; i++) { s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18) ^ (w[i - 15] >> 3); s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19) ^ (w[i - 2] >> 10); w[i] = w[i - 16] + s0 + w[i - 7] + s1; } uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj; a = hA[0]; b = hA[1]; c = hA[2]; d = hA[3]; e = hA[4]; f = hA[5]; g = hA[6]; h = hA[7]; for (i = 0; i < 64; i++) { s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); maj = (a & (b ^ c)) ^ (b & c); temp2 = s0 + maj; h = g; g = f; f = e; e = d + temp1; d = c; c = b; b = a; a = temp1 + temp2; } hA[0] += a; hA[1] += b; hA[2] += c; hA[3] += d; hA[4] += e; hA[5] += f; hA[6] += g; hA[7] += h; } for (i = 0; i < 8; i++) { result[i * 4] = (hA[i] >> 24); result[i * 4 + 1] = (hA[i] >> 16); result[i * 4 + 2] = (hA[i] >> 8); result[i * 4 + 3] = hA[i]; } return 0; } void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) { z[0] = RIGHTROTATE(x[0], i); z[1] = RIGHTROTATE(x[1], i); z[2] = RIGHTROTATE(x[2], i); } void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) { z[0] = x[0] >> i; z[1] = x[1] >> i; z[2] = x[2] >> i; } void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; uint32_t t1[3]; mpc_XOR(a, b, t0); mpc_XOR(a, c, t1); mpc_AND(t0, t1, z, randomness, randCount, views, countY); mpc_XOR(z, a, z); } void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { uint32_t t0[3]; //e & (f^g) ^ g mpc_XOR(f,g,t0); mpc_AND(e,t0,t0, randomness, randCount, views, countY); mpc_XOR(t0,g,z); } int mpc_sha256(unsigned char* results[3], unsigned char inputs[3][BLOCK_SIZE], int numBits, int addView, uint32_t hA[8][3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) { /* if (numBits > 447) { printf("Input too long, aborting!"); return -1; } */ int chars = numBits >> 3; unsigned char* chunks[3]; uint32_t w[64][3]; uint32_t msg[MSG_SIZE/4]; /* if (addMsg) { for (int j=0;j<(numBits/32);j++) { msg[j] = (addMsg[j*4]<<24) | (addMsg[j*4+1]<<16) | (addMsg[j*4+2] << 8) | (addMsg[j*4+3]); } } */ for (int i =0; i<64;i++) { w[i][0]=w[i][1]=w[i][2] = 0; } for (int i = 0; i < 3; i++) { chunks[i] = calloc(64, 1); //512 bits memcpy(chunks[i], inputs[i], BLOCK_SIZE /*chars*/); /* chunks[i][chars] = 0x80; //Last 8 chars used for storing length of input without padding, in big-endian. //Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest chunk[60] = numBits >> 24; chunk[61] = numBits >> 16; chunks[i][62] = numBits >> 8; chunks[i][63] = numBits; */ if (addView) memcpy(views[i].x, chunks[i], 64); for (int j = 0; j < 16; j++) { w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16) | (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3]; } free(chunks[i]); } uint32_t s0[3], s1[3]; uint32_t t0[3], t1[3]; for (int j = 16; j < 64; j++) { //s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3); mpc_RIGHTROTATE(w[j-15], 7, t0); mpc_RIGHTROTATE(w[j-15], 18, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-15], 3, t1); mpc_XOR(t0, t1, s0); //s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10); mpc_RIGHTROTATE(w[j-2], 17, t0); mpc_RIGHTROTATE(w[j-2], 19, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j-2], 10, t1); mpc_XOR(t0, t1, s1); //w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i]; mpc_ADD(w[j-16], s0, t1, randomness, randCount, views, countY); mpc_ADD(w[j-7], t1, t1, randomness, randCount, views, countY); mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY); } uint32_t a[3] = { hA[0][0],hA[0][1],hA[0][2] }; uint32_t b[3] = { hA[1][0],hA[1][1],hA[1][2] }; uint32_t c[3] = { hA[2][0],hA[2][1],hA[2][2] }; uint32_t d[3] = { hA[3][0],hA[3][1],hA[3][2] }; uint32_t e[3] = { hA[4][0],hA[4][1],hA[4][2] }; uint32_t f[3] = { hA[5][0],hA[5][1],hA[5][2] }; uint32_t g[3] = { hA[6][0],hA[6][1],hA[6][2] }; uint32_t h[3] = { hA[7][0],hA[7][1],hA[7][2] }; uint32_t temp1[3], temp2[3], maj[3]; for (int i = 0; i < 64; i++) { //s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25); mpc_RIGHTROTATE(e, 6, t0); mpc_RIGHTROTATE(e, 11, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(e, 25, t1); mpc_XOR(t0, t1, s1); //ch = (e & f) ^ ((~e) & g); //temp1 = h + s1 + CH(e,f,g) + k[i]+w[i]; //t0 = h + s1 mpc_ADD(h, s1, t0, randomness, randCount, views, countY); mpc_CH(e, f, g, t1, randomness, randCount, views, countY); //t1 = t0 + t1 (h+s1+ch) mpc_ADD(t0, t1, t1, randomness, randCount, views, countY); mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY); mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY); //s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22); mpc_RIGHTROTATE(a, 2, t0); mpc_RIGHTROTATE(a, 13, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(a, 22, t1); mpc_XOR(t0, t1, s0); mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY); //temp2 = s0+maj; mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY); memcpy(h, g, sizeof(uint32_t) * 3); memcpy(g, f, sizeof(uint32_t) * 3); memcpy(f, e, sizeof(uint32_t) * 3); //e = d+temp1; mpc_ADD(d, temp1, e, randomness, randCount, views, countY); memcpy(d, c, sizeof(uint32_t) * 3); memcpy(c, b, sizeof(uint32_t) * 3); memcpy(b, a, sizeof(uint32_t) * 3); //a = temp1+temp2; mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY); } /* uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] }, { hA[4],hA[4],hA[4] }, { hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } }; */ uint32_t hHa[8][3]; mpc_ADD(hA[0], a, hHa[0], randomness, randCount, views, countY); mpc_ADD(hA[1], b, hHa[1], randomness, randCount, views, countY); mpc_ADD(hA[2], c, hHa[2], randomness, randCount, views, countY); mpc_ADD(hA[3], d, hHa[3], randomness, randCount, views, countY); mpc_ADD(hA[4], e, hHa[4], randomness, randCount, views, countY); mpc_ADD(hA[5], f, hHa[5], randomness, randCount, views, countY); mpc_ADD(hA[6], g, hHa[6], randomness, randCount, views, countY); mpc_ADD(hA[7], h, hHa[7], randomness, randCount, views, countY); for (int i = 0; i < 8; i++) { hA[i][0] = hHa[i][0]; hA[i][1] = hHa[i][1]; hA[i][2] = hHa[i][2]; } for (int i = 0; i < 8; i++) { mpc_RIGHTSHIFT(hHa[i], 24, t0); results[0][i * 4] = t0[0]; results[1][i * 4] = t0[1]; results[2][i * 4] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 16, t0); results[0][i * 4 + 1] = t0[0]; results[1][i * 4 + 1] = t0[1]; results[2][i * 4 + 1] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 8, t0); results[0][i * 4 + 2] = t0[0]; results[1][i * 4 + 2] = t0[1]; results[2][i * 4 + 2] = t0[2]; results[0][i * 4 + 3] = hHa[i][0]; results[1][i * 4 + 3] = hHa[i][1]; results[2][i * 4 + 3] = hHa[i][2]; } return 0; } int writeToFile(char filename[], void* data, int size, int numItems) { FILE *file; file = fopen(filename, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(data, size, numItems, file); fclose(file); return 0; } int secretShare(unsigned char* input, int numBytes, unsigned char output[3][numBytes]) { if(RAND_bytes(output[0], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } if(RAND_bytes(output[1], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } for (int j = 0; j < numBytes; j++) { output[2][j] = input[j] ^ output[0][j] ^ output[1][j]; } return 0; } int mpc_hmac_sha256(unsigned char* results[3], unsigned char ek[3][BLOCK_SIZE], int numBytes, char * Cha, unsigned char *randomness[3], int* randCount, View views[3], int* countY) { unsigned char shares[3][BLOCK_SIZE]; uint32_t hA[8][3]; int i; unsigned char* innerhash[3],*outerhash[3]; innerhash[0] = malloc(32); innerhash[1] = malloc(32); innerhash[2] = malloc(32); outerhash[0] = malloc(32); outerhash[1] = malloc(32); outerhash[2] = malloc(32); if (strlen(Cha) > MSG_SIZE) { printf("Input too long, aborting!"); return -1; } for (i=0;i<8;i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; memset(shares[0],0,sizeof(shares[0])); memset(shares[1],0,sizeof(shares[1])); memset(shares[2],0,sizeof(shares[2])); for (i = 0; i < BLOCK_SIZE;i++) { shares[0][i] = ek[0][i] ^ 0x36; shares[1][i] = ek[1][i] ^ 0x36; shares[2][i] = ek[2][i] ^ 0x36; } mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY); memset(shares[0],0,sizeof(shares[0])); memset(shares[1],0,sizeof(shares[1])); memset(shares[2],0,sizeof(shares[2])); for (i = 0; i < strlen(Cha);i++) { shares[0][i] = Cha[i]; shares[1][i] = Cha[i]; shares[2][i] = Cha[i]; } shares[0][strlen(Cha)] = shares[1][strlen(Cha)] = shares[2][strlen(Cha)] = 0x80; shares[0][61] = shares[1][61] = shares[2][61] = (((strlen(Cha)* 8)+512) >> 16) & 0xFF; shares[0][62] = shares[1][62] = shares[2][62] = (((strlen(Cha)* 8)+512) >> 8) & 0xFF; shares[0][63] = shares[1][63] = shares[2][63] = ((strlen(Cha)* 8)+512) & 0xFF; mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY); for (i=0;i<8;i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; memset(shares,0,3*BLOCK_SIZE); for (i = 0; i < BLOCK_SIZE;i++) { shares[0][i] = ek[0][i] ^ 0x5c; shares[1][i] = ek[1][i] ^ 0x5c; shares[2][i] = ek[2][i] ^ 0x5c; } mpc_sha256(outerhash, shares, 512, 0, hA, randomness, randCount, views, countY); memset(shares,0,3*BLOCK_SIZE); for (i = 0; i < 32;i++) { shares[0][i] = innerhash[0][i]; shares[1][i] = innerhash[1][i]; shares[2][i] = innerhash[2][i]; } shares[0][32] = shares[1][32] = shares[2][32] = 0x80; shares[0][62] = shares[1][62] = shares[2][62] = 3; mpc_sha256(results, shares, 512, 0, hA, randomness, randCount, views, countY); free(innerhash[0]); free(innerhash[1]); free(innerhash[2]); free(outerhash[0]); free(outerhash[1]); free(outerhash[2]); return 0; } a commit(int numBytes, unsigned char shares[3][BLOCK_SIZE], char * Cha, unsigned char *randomness[3], unsigned char rs[3][4], View views[3]) { unsigned char* hashes[3]; hashes[0] = malloc(32); hashes[1] = malloc(32); hashes[2] = malloc(32); int* randCount = calloc(1, sizeof(int)); int* countY = calloc(1, sizeof(int)); uint32_t hA[8][3]; int i; for (i=0;i<8;i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; *countY = 0; shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = 0x80; shares[0][62] = shares[1][62] = shares[2][62] = ((numBytes * 8) >> 8) & 0xFF; shares[0][63] = shares[1][63] = shares[2][63] = (numBytes * 8) & 0xFF; mpc_sha256(hashes, shares, numBytes * 8, 1, hA, randomness, randCount, views, countY); unsigned char * hmac[3]; hmac[0] = malloc(32); hmac[1] = malloc(32); hmac[2] = malloc(32); shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = shares[0][62] = shares[1][62] = shares[2][62] = shares[0][63] = shares[1][63] = shares[2][63] = 0; mpc_hmac_sha256(hmac, shares, numBytes, Cha, randomness, randCount, views, countY); //Explicitly add y to view free(randCount); for(int i = 0; i<8; i++) { views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16) | (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3]; views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16) | (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3]; views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16) | (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3]; *countY += 1; } for(int i = 0; i<8; i++) { views[0].y[*countY] = (hmac[0][i * 4] << 24) | (hmac[0][i * 4 + 1] << 16) | (hmac[0][i * 4 + 2] << 8) | hmac[0][i * 4 + 3]; views[1].y[*countY] = (hmac[1][i * 4] << 24) | (hmac[1][i * 4 + 1] << 16) | (hmac[1][i * 4 + 2] << 8) | hmac[1][i * 4 + 3]; views[2].y[*countY] = (hmac[2][i * 4] << 24) | (hmac[2][i * 4 + 1] << 16) | (hmac[2][i * 4 + 2] << 8) | hmac[2][i * 4 + 3]; *countY += 1; } free(countY); free(hashes[0]); free(hashes[1]); free(hashes[2]); free(hmac[0]); free(hmac[1]); free(hmac[2]); uint32_t* result11 = malloc(32); uint32_t* result21 = malloc(32); output(views[0], result11,result21); uint32_t* result12 = malloc(32); uint32_t* result22 = malloc(32); output(views[1], result12, result22); uint32_t* result13 = malloc(32); uint32_t* result23 = malloc(32); output(views[2], result13,result23); a a; memcpy(a.yp1[0], result11, 32); memcpy(a.yp1[1], result12, 32); memcpy(a.yp1[2], result13, 32); memcpy(a.yp2[0], result21, 32); memcpy(a.yp2[1], result22, 32); memcpy(a.yp2[2], result23, 32); free(result11); free(result12); free(result13); free(result21); free(result22); free(result23); return a; } z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) { z z; memcpy(z.ke, keys[e], 16); memcpy(z.ke1, keys[(e + 1) % 3], 16); z.ve = views[e]; z.ve1 = views[(e + 1) % 3]; memcpy(z.re, rs[e],4); memcpy(z.re1, rs[(e + 1) % 3],4); return z; } int GetNextSelected(int size,unsigned char * data, int *dataPtr) { int value=0; int modulo = size; while (size > 0) { value <<=8; value += (int) data[*dataPtr]; size >>=8; (*dataPtr)++; } if (!(value & 0x01)) // will return odd number value++; return (int) value % modulo; } Merkle * BuildMerkleTree(int NumRounds,z * zs) { int i; Merkle * tempNode; Merkle * startNode = NULL; Merkle * childNode; Merkle * prevNode; int done = 0; int odd = 0; unsigned char datablock[64]; if ((!zs) || (NumRounds < 2)) return NULL; prevNode = NULL; for (i=0; i < NumRounds;i++) { tempNode = malloc(sizeof(Merkle)); if (i==0) startNode = tempNode; sha256(tempNode->data,(unsigned char *)&(zs[i]),sizeof(z) * 8); tempNode->parent = NULL; tempNode->type = 0; tempNode->next = NULL; tempNode->previous = prevNode; if (prevNode) prevNode->next = tempNode; if (!odd) { tempNode->sibling = NULL; odd = 1; } else { prevNode->sibling = tempNode; tempNode->sibling = prevNode; odd = 0; } prevNode = tempNode; } while (!done) { childNode = startNode; while (childNode->parent) childNode = childNode->parent; if (!childNode->sibling) { done = 1; continue; } odd = 0; prevNode = NULL; while (childNode != NULL) { tempNode = malloc(sizeof(Merkle)); tempNode->type = 1; childNode->parent = tempNode; tempNode->previous = prevNode; if (prevNode) prevNode->next = tempNode; tempNode->next = NULL; tempNode->parent = NULL; if (!odd) { tempNode->sibling = NULL; odd = 1; } else { prevNode->sibling = tempNode; tempNode->sibling = prevNode; odd = 0; } if (childNode->sibling) { childNode->sibling->parent = tempNode; memcpy(datablock,childNode->data,32); memcpy(&(datablock[32]),childNode->sibling->data,32); sha256(tempNode->data,datablock,64*8); childNode = childNode->sibling->next; } else { memset(datablock,0,sizeof(datablock)); memcpy(datablock,childNode->data,32); sha256(tempNode->data,datablock,64*8); childNode = childNode->sibling; } prevNode = tempNode; } } return startNode; } void DestroyMerkleTree(Merkle * startNode) { Merkle * tempNode; if (startNode->parent) DestroyMerkleTree(startNode->parent); startNode->parent = NULL; while (startNode) { tempNode = startNode->next; free(startNode); startNode = tempNode; } return; } #define NUM_LOOPS 1 int main(int argc, char * argv[]) { setbuf(stdout, NULL); srand((unsigned) time(NULL)); init_EVP(); openmp_thread_setup(); char CHALLENGE[BLOCK_SIZE]; char ek[BLOCK_SIZE]; //eval key is 447 bits // if (argc != 4) { printf("Usage: %s <number of rounds (e.g. 20, 40, 60, 80, 100)> <challenge (Max %d char)> <eval key (Max %d char)>\n",argv[0],MSG_SIZE,MSG_SIZE); return -1; } NUM_ROUNDS = atoi(argv[1]); if ((NUM_ROUNDS & 0x01) || (NUM_ROUNDS < 4)) { printf("Number of rounds should be even and > 4\n"); return -1; } unsigned char garbage[4]; if(RAND_bytes(garbage, 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } memset(CHALLENGE,0,sizeof(CHALLENGE)); strncpy(CHALLENGE,argv[2],MSG_SIZE); //55 is max length as we only support 447 bits = 55.875 bytes memset(ek,0,sizeof(ek)); strncpy(ek,argv[3],MSG_SIZE); int i = strlen(ek); printf("ek length: %d\n", i); printf("Iterations of ZKBdf: %d\n", NUM_ROUNDS); unsigned char input[BLOCK_SIZE]; // 512 bits memset(input,0,sizeof(input)); memcpy(input,ek,sizeof(input)); struct timeval begin, delta; gettimeofday(&begin,NULL); unsigned char rs[NUM_ROUNDS][3][4]; unsigned char keys[NUM_ROUNDS][3][16]; a as[NUM_ROUNDS]; View localViews[NUM_ROUNDS][3]; int totalCrypto = 0; z* zs; for(int loops=0;loops<NUM_LOOPS;loops++) { //Generating keys if(RAND_bytes((unsigned char *) keys, NUM_ROUNDS*3*16) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } if(RAND_bytes((unsigned char *)rs, NUM_ROUNDS*3*4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } //Sharing secrets unsigned char shares[NUM_ROUNDS][3][BLOCK_SIZE]; memset(shares,0,NUM_ROUNDS*3*BLOCK_SIZE); if(RAND_bytes((unsigned char *)shares, NUM_ROUNDS*3*BLOCK_SIZE) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } #pragma omp parallel for for(int k=0; k<NUM_ROUNDS; k++) { for (int j = 0; j < i; j++) { shares[k][2][j] = input[j] ^ shares[k][0][j] ^ shares[k][1][j]; } for (int j = i; j < BLOCK_SIZE; j++) { shares[k][2][j] = shares[k][0][j] = shares[k][1][j] = 0; } } unsigned char *randomness[NUM_ROUNDS][3]; int es[NUM_ROUNDS]; uint32_t finalHash1[8]; uint32_t finalHash2[8]; zs = malloc(sizeof(z)*NUM_ROUNDS); int r; for (r=0;r<NUM_ROUNDS;r++) { unsigned char plaintext[16]; memset(plaintext,0x30,sizeof(plaintext)); if (r!=0) { SHA256_CTX ctx; unsigned char prevroundhash[SHA256_DIGEST_LENGTH]; SHA256_Init(&ctx); SHA256_Update(&ctx, &(zs[r-1]), sizeof(z)); SHA256_Final(prevroundhash, &ctx); memcpy(plaintext,prevroundhash,sizeof(plaintext)); } //Generating randomness // #pragma omp parallel for // for(int k=0; k<(NUM_ROUNDS); k++) { for(int j = 0; j<3; j++) { randomness[r][j] = malloc((ySize*4)*sizeof(unsigned char)); getAllRandomness(keys[r][j], plaintext, randomness[r][j]); } // } //Running MPC-SHA2 // #pragma omp parallel for // for(int k=0; k<NUM_ROUNDS; k++) { as[r] = commit(i, shares[r], CHALLENGE, randomness[r], rs[r], localViews[r]); for(int j=0; j<3; j++) { free(randomness[r][j]); } // } //Committing // #pragma omp parallel for // for(int k=0; k<(NUM_ROUNDS); k++) { unsigned char hash1[SHA256_DIGEST_LENGTH]; memset(hash1,0,sizeof(hash1)); H(keys[r][0], localViews[r][0], rs[r][0], hash1); memcpy(as[r].h[0], &hash1, 32); H(keys[r][1], localViews[r][1], rs[r][1], hash1); memcpy(as[r].h[1], &hash1, 32); H(keys[r][2], localViews[r][2], rs[r][2], hash1); memcpy(as[r].h[2], &hash1, 32); // } //Generating E if (r==0) { for (int j = 0; j < 8; j++) { finalHash1[j] = as[0].yp1[0][j]^as[0].yp1[1][j]^as[0].yp1[2][j]; finalHash2[j] = as[0].yp2[0][j]^as[0].yp2[1][j]^as[0].yp2[2][j]; } printf("output H(ek) = "); for (int i = 0; i< 8;i++) { printf("%02X",finalHash1[i]); } printf("\n"); printf("output HMAC(ek,Challenge) = "); for (int i = 0; i< 8;i++) { printf("%02X",finalHash2[i]); } printf("\n"); } H3(finalHash1, finalHash2, &(as[r]), /*NUM_ROUNDS*/ 1, &(es[r])); //Packing Z // #pragma omp parallel for // for(int i = 0; i<(NUM_ROUNDS); i++) { zs[r] = prove(es[r],keys[r],rs[r], localViews[r]); // } } } // now to extract the PCP proofs int PCProunds = (int) ceil(log(NUM_ROUNDS)/log(2)); int Totalselected = 0; unsigned char PCPselected[NUM_ROUNDS]; Merkle * startNode = NULL; Merkle * currNode = NULL; Merkle * tempNode = NULL; Merkle * rootNode = NULL; unsigned char MerkleHash[64]; unsigned char MerkleBranch[(32*2*PCProunds)+32]; int MerkleHashPtr; int Nextselected; startNode = BuildMerkleTree(NUM_ROUNDS,zs); rootNode = startNode; while (rootNode->parent) rootNode = rootNode->parent; memset(MerkleHash,0,sizeof(MerkleHash)); memcpy(&(MerkleHash[32]),rootNode->data,32); sha256(MerkleHash,MerkleHash,64*8); MerkleHashPtr = 0; memset(PCPselected,0,sizeof(PCPselected)); while (Totalselected < PCProunds) { Nextselected = GetNextSelected(NUM_ROUNDS,MerkleHash,&MerkleHashPtr); if (!PCPselected[Nextselected]) { PCPselected[Nextselected] = 1; Totalselected++; } if (MerkleHashPtr >= 32) { sha256(MerkleHash,MerkleHash,64*8); MerkleHashPtr = 0; } } gettimeofday(&delta,NULL); unsigned long inMilli = (delta.tv_sec - begin.tv_sec)*1000000 + (delta.tv_usec - begin.tv_usec); inMilli /= 1000; //Writing ZKBoo proofs to file FILE *file; char outputFile[3*sizeof(int) + 8]; sprintf(outputFile, "out%i.bin", NUM_ROUNDS); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(as, sizeof(a), NUM_ROUNDS, file); fwrite(zs, sizeof(z), NUM_ROUNDS, file); fclose(file); // writing PCP proofs to file sprintf(outputFile, "pcp%i-%i.bin", NUM_ROUNDS,PCProunds); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } currNode = startNode; fwrite(rootNode->data,32,1,file); // write the root node first tempNode = startNode; for (int k =0;k<NUM_ROUNDS;k++) { fwrite(tempNode->data,32,1,file); tempNode = tempNode->next; } for (int j = 0; j < NUM_ROUNDS; j++) { if (PCPselected[j]) { // print current node tempNode = currNode; memset(MerkleBranch,0,sizeof(MerkleBranch)); MerkleHashPtr = 0; while(tempNode->parent != NULL) // write the current node { if (tempNode->sibling) { if (tempNode->sibling == tempNode->next) { memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->data,32); MerkleHashPtr += 32; memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->sibling->data,32); MerkleHashPtr += 32; } else { memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->sibling->data,32); MerkleHashPtr += 32; memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->data,32); MerkleHashPtr += 32; } } else { memcpy(&(MerkleBranch[MerkleHashPtr]),tempNode->data,32); MerkleHashPtr += 64; } tempNode = tempNode->parent; } fwrite(MerkleBranch,MerkleHashPtr,1,file); fwrite(&(as[j]), sizeof(a), 1, file); fwrite(&(zs[j]), sizeof(z), 1, file); fwrite(&(as[j-1]), sizeof(a), 1, file); fwrite(&(zs[j-1]), sizeof(z), 1, file); } currNode = currNode->next; } DestroyMerkleTree(startNode); fclose(file); free(zs); printf("Total time taken for %d loops: %d mili-seconds\n",NUM_LOOPS,inMilli); printf("Time per loop: %d mili-seconds\n",inMilli/NUM_LOOPS); printf("\n"); printf("zkboo Proof output to file %s", outputFile); openmp_thread_cleanup(); cleanup_EVP(); return EXIT_SUCCESS; }
/* * Name: zkbdf_eval.c Author: Tan Teik Guan Description: Eval function for VDF * realization using ZKBoo. Modified from MPC_SHA256.c */ /* * * ============================================================================ * Name : MPC_SHA256.c Author : Sobuno Version : 0.1 * Description : MPC SHA256 for one block only * =========================================================================== * = * */ #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "shared.h" #include <math.h> #include "omp.h" #define CH(e,f,g) ((e & f) ^ ((~e) & g)) int totalRandom = 0; int totalSha = 0; int totalSS = 0; int totalHash = 0; int NUM_ROUNDS = 100; uint32_t rand32() { uint32_t x; x = rand() & 0xff; x |= (rand() & 0xff) << 8; x |= (rand() & 0xff) << 16; x |= (rand() & 0xff) << 24; return x; } void printbits(uint32_t n) { if (n) { printbits(n >> 1); printf("%d", n & 1); } } void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) { z[0] = x[0] ^ y[0]; z[1] = x[1] ^ y[1]; z[2] = x[2] ^ y[2]; } void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint32_t t[3] = {0}; t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1]; t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2]; t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) { z[0] = ~x[0]; z[1] = ~x[1]; z[2] = ~x[2]; } void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t c[3] = {0}; uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for (int i = 0; i < 31; i++) { a[0] = GETBIT(x[0] ^ c[0], i); a[1] = GETBIT(x[1] ^ c[1], i); a[2] = GETBIT(x[2] ^ c[2], i); b[0] = GETBIT(y[0] ^ c[0], i); b[1] = GETBIT(y[1] ^ c[1], i); b[2] = GETBIT(y[2] ^ c[2], i); t = (a[0] & b[1]) ^ (a[1] & b[0]) ^ GETBIT(r[1], i); SETBIT(c[0], i + 1, t ^ (a[0] & b[0]) ^ GETBIT(c[0], i) ^ GETBIT(r[0], i)); t = (a[1] & b[2]) ^ (a[2] & b[1]) ^ GETBIT(r[2], i); SETBIT(c[1], i + 1, t ^ (a[1] & b[1]) ^ GETBIT(c[1], i) ^ GETBIT(r[1], i)); t = (a[2] & b[0]) ^ (a[0] & b[2]) ^ GETBIT(r[0], i); SETBIT(c[2], i + 1, t ^ (a[2] & b[2]) ^ GETBIT(c[2], i) ^ GETBIT(r[2], i)); } z[0] = x[0] ^ y[0] ^ c[0]; z[1] = x[1] ^ y[1] ^ c[1]; z[2] = x[2] ^ y[2] ^ c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t c[3] = {0}; uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for (int i = 0; i < 31; i++) { a[0] = GETBIT(x[0] ^ c[0], i); a[1] = GETBIT(x[1] ^ c[1], i); a[2] = GETBIT(x[2] ^ c[2], i); b[0] = GETBIT(y ^ c[0], i); b[1] = GETBIT(y ^ c[1], i); b[2] = GETBIT(y ^ c[2], i); t = (a[0] & b[1]) ^ (a[1] & b[0]) ^ GETBIT(r[1], i); SETBIT(c[0], i + 1, t ^ (a[0] & b[0]) ^ GETBIT(c[0], i) ^ GETBIT(r[0], i)); t = (a[1] & b[2]) ^ (a[2] & b[1]) ^ GETBIT(r[2], i); SETBIT(c[1], i + 1, t ^ (a[1] & b[1]) ^ GETBIT(c[1], i) ^ GETBIT(r[1], i)); t = (a[2] & b[0]) ^ (a[0] & b[2]) ^ GETBIT(r[0], i); SETBIT(c[2], i + 1, t ^ (a[2] & b[2]) ^ GETBIT(c[2], i) ^ GETBIT(r[2], i)); } z[0] = x[0] ^ y ^ c[0]; z[1] = x[1] ^ y ^ c[1]; z[2] = x[2] ^ y ^ c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } int sha256(unsigned char *result, unsigned char *input, int numBits) { uint32_t hA[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; int remainingBits = numBits; int chars; int i; while (remainingBits >= 0) { if (remainingBits > 447) { chars = 64; remainingBits -= 512; } else { chars = remainingBits >> 3; remainingBits = -1; } unsigned char *chunk = calloc(64, 1); //512 bits memcpy(chunk, input, chars); input += chars; if (chars < 64) { chunk[chars] = 0x80; chunk[60] = numBits >> 24; chunk[61] = numBits >> 16; chunk[62] = numBits >> 8; chunk[63] = numBits; } uint32_t w[64]; for (i = 0; i < 16; i++) { w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16) | (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3]; } uint32_t s0, s1; for (i = 16; i < 64; i++) { s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18) ^ (w[i - 15] >> 3); s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19) ^ (w[i - 2] >> 10); w[i] = w[i - 16] + s0 + w[i - 7] + s1; } uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj; a = hA[0]; b = hA[1]; c = hA[2]; d = hA[3]; e = hA[4]; f = hA[5]; g = hA[6]; h = hA[7]; for (i = 0; i < 64; i++) { s1 = RIGHTROTATE(e, 6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; s0 = RIGHTROTATE(a, 2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); maj = (a & (b ^ c)) ^ (b & c); temp2 = s0 + maj; h = g; g = f; f = e; e = d + temp1; d = c; c = b; b = a; a = temp1 + temp2; } hA[0] += a; hA[1] += b; hA[2] += c; hA[3] += d; hA[4] += e; hA[5] += f; hA[6] += g; hA[7] += h; } for (i = 0; i < 8; i++) { result[i * 4] = (hA[i] >> 24); result[i * 4 + 1] = (hA[i] >> 16); result[i * 4 + 2] = (hA[i] >> 8); result[i * 4 + 3] = hA[i]; } return 0; } void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) { z[0] = RIGHTROTATE(x[0], i); z[1] = RIGHTROTATE(x[1], i); z[2] = RIGHTROTATE(x[2], i); } void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) { z[0] = x[0] >> i; z[1] = x[1] >> i; z[2] = x[2] >> i; } void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t t0[3]; uint32_t t1[3]; mpc_XOR(a, b, t0); mpc_XOR(a, c, t1); mpc_AND(t0, t1, z, randomness, randCount, views, countY); mpc_XOR(z, a, z); } void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t t0[3]; //e & (f ^ g) ^ g mpc_XOR(f, g, t0); mpc_AND(e, t0, t0, randomness, randCount, views, countY); mpc_XOR(t0, g, z); } int mpc_sha256(unsigned char *results[3], unsigned char inputs[3][BLOCK_SIZE], int numBits, int addView, uint32_t hA[8][3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { /* * if (numBits > 447) { printf("Input too long, aborting!"); return -1; } */ int chars = numBits >> 3; unsigned char *chunks[3]; uint32_t w[64][3]; uint32_t msg[MSG_SIZE / 4]; /* * if (addMsg) { for (int j=0;j<(numBits/32);j++) { msg[j] = * (addMsg[j*4]<<24) | (addMsg[j*4+1]<<16) | (addMsg[j*4+2] << 8) | * (addMsg[j*4+3]); * * } * } */ for (int i = 0; i < 64; i++) { w[i][0] = w[i][1] = w[i][2] = 0; } for (int i = 0; i < 3; i++) { chunks[i] = calloc(64, 1); //512 bits memcpy(chunks[i], inputs[i], BLOCK_SIZE /* chars */ ); /* * chunks[i][chars] = 0x80; //Last 8 chars used for storing length of * input without padding, in big-endian. //Since we only care for one * block, we are safe with just using last 9 bits and 0'ing the rest * * chunk[60] = numBits >> 24; chunk[61] = numBits >> 16; chunks[i][62] = * numBits >> 8; chunks[i][63] = numBits; */ if (addView) memcpy(views[i].x, chunks[i], 64); for (int j = 0; j < 16; j++) { w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16) | (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3]; } free(chunks[i]); } uint32_t s0[3], s1[3]; uint32_t t0[3], t1[3]; for (int j = 16; j < 64; j++) { //s0[i] = RIGHTROTATE(w[i][j - 15], 7) ^ RIGHTROTATE(w[i][j - 15], 18) ^ (w[i][j - 15] >> 3); mpc_RIGHTROTATE(w[j - 15], 7, t0); mpc_RIGHTROTATE(w[j - 15], 18, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j - 15], 3, t1); mpc_XOR(t0, t1, s0); //s1[i] = RIGHTROTATE(w[i][j - 2], 17) ^ RIGHTROTATE(w[i][j - 2], 19) ^ (w[i][j - 2] >> 10); mpc_RIGHTROTATE(w[j - 2], 17, t0); mpc_RIGHTROTATE(w[j - 2], 19, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j - 2], 10, t1); mpc_XOR(t0, t1, s1); //w[i][j] = w[i][j - 16] + s0[i] + w[i][j - 7] + s1[i]; mpc_ADD(w[j - 16], s0, t1, randomness, randCount, views, countY); mpc_ADD(w[j - 7], t1, t1, randomness, randCount, views, countY); mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY); } uint32_t a[3] = {hA[0][0], hA[0][1], hA[0][2]}; uint32_t b[3] = {hA[1][0], hA[1][1], hA[1][2]}; uint32_t c[3] = {hA[2][0], hA[2][1], hA[2][2]}; uint32_t d[3] = {hA[3][0], hA[3][1], hA[3][2]}; uint32_t e[3] = {hA[4][0], hA[4][1], hA[4][2]}; uint32_t f[3] = {hA[5][0], hA[5][1], hA[5][2]}; uint32_t g[3] = {hA[6][0], hA[6][1], hA[6][2]}; uint32_t h[3] = {hA[7][0], hA[7][1], hA[7][2]}; uint32_t temp1[3], temp2[3], maj[3]; for (int i = 0; i < 64; i++) { //s1 = RIGHTROTATE(e, 6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); mpc_RIGHTROTATE(e, 6, t0); mpc_RIGHTROTATE(e, 11, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(e, 25, t1); mpc_XOR(t0, t1, s1); //ch = (e & f) ^ ((~e) & g); //temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; //t0 = h + s1 mpc_ADD(h, s1, t0, randomness, randCount, views, countY); mpc_CH(e, f, g, t1, randomness, randCount, views, countY); //t1 = t0 + t1(h + s1 + ch) mpc_ADD(t0, t1, t1, randomness, randCount, views, countY); mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY); mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY); //s0 = RIGHTROTATE(a, 2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); mpc_RIGHTROTATE(a, 2, t0); mpc_RIGHTROTATE(a, 13, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(a, 22, t1); mpc_XOR(t0, t1, s0); mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY); //temp2 = s0 + maj; mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY); memcpy(h, g, sizeof(uint32_t) * 3); memcpy(g, f, sizeof(uint32_t) * 3); memcpy(f, e, sizeof(uint32_t) * 3); //e = d + temp1; mpc_ADD(d, temp1, e, randomness, randCount, views, countY); memcpy(d, c, sizeof(uint32_t) * 3); memcpy(c, b, sizeof(uint32_t) * 3); memcpy(b, a, sizeof(uint32_t) * 3); //a = temp1 + temp2; mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY); } /* * uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, * { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] }, { hA[4],hA[4],hA[4] }, { * hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } }; */ uint32_t hHa[8][3]; mpc_ADD(hA[0], a, hHa[0], randomness, randCount, views, countY); mpc_ADD(hA[1], b, hHa[1], randomness, randCount, views, countY); mpc_ADD(hA[2], c, hHa[2], randomness, randCount, views, countY); mpc_ADD(hA[3], d, hHa[3], randomness, randCount, views, countY); mpc_ADD(hA[4], e, hHa[4], randomness, randCount, views, countY); mpc_ADD(hA[5], f, hHa[5], randomness, randCount, views, countY); mpc_ADD(hA[6], g, hHa[6], randomness, randCount, views, countY); mpc_ADD(hA[7], h, hHa[7], randomness, randCount, views, countY); for (int i = 0; i < 8; i++) { hA[i][0] = hHa[i][0]; hA[i][1] = hHa[i][1]; hA[i][2] = hHa[i][2]; } for (int i = 0; i < 8; i++) { mpc_RIGHTSHIFT(hHa[i], 24, t0); results[0][i * 4] = t0[0]; results[1][i * 4] = t0[1]; results[2][i * 4] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 16, t0); results[0][i * 4 + 1] = t0[0]; results[1][i * 4 + 1] = t0[1]; results[2][i * 4 + 1] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 8, t0); results[0][i * 4 + 2] = t0[0]; results[1][i * 4 + 2] = t0[1]; results[2][i * 4 + 2] = t0[2]; results[0][i * 4 + 3] = hHa[i][0]; results[1][i * 4 + 3] = hHa[i][1]; results[2][i * 4 + 3] = hHa[i][2]; } return 0; } int writeToFile(char filename[], void *data, int size, int numItems) { FILE *file; file = fopen(filename, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(data, size, numItems, file); fclose(file); return 0; } int secretShare(unsigned char *input, int numBytes, unsigned char output[3][numBytes]) { if (RAND_bytes(output[0], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } if (RAND_bytes(output[1], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } for (int j = 0; j < numBytes; j++) { output[2][j] = input[j] ^ output[0][j] ^ output[1][j]; } return 0; } int mpc_hmac_sha256(unsigned char *results[3], unsigned char ek[3][BLOCK_SIZE], int numBytes, char *Cha, unsigned char *randomness[3], int *randCount, View views[3], int *countY) { unsigned char shares[3][BLOCK_SIZE]; uint32_t hA[8][3]; int i; unsigned char *innerhash[3], *outerhash[3]; innerhash[0] = malloc(32); innerhash[1] = malloc(32); innerhash[2] = malloc(32); outerhash[0] = malloc(32); outerhash[1] = malloc(32); outerhash[2] = malloc(32); if (strlen(Cha) > MSG_SIZE) { printf("Input too long, aborting!"); return -1; } for (i = 0; i < 8; i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; memset(shares[0], 0, sizeof(shares[0])); memset(shares[1], 0, sizeof(shares[1])); memset(shares[2], 0, sizeof(shares[2])); for (i = 0; i < BLOCK_SIZE; i++) { shares[0][i] = ek[0][i] ^ 0x36; shares[1][i] = ek[1][i] ^ 0x36; shares[2][i] = ek[2][i] ^ 0x36; } mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY); memset(shares[0], 0, sizeof(shares[0])); memset(shares[1], 0, sizeof(shares[1])); memset(shares[2], 0, sizeof(shares[2])); for (i = 0; i < strlen(Cha); i++) { shares[0][i] = Cha[i]; shares[1][i] = Cha[i]; shares[2][i] = Cha[i]; } shares[0][strlen(Cha)] = shares[1][strlen(Cha)] = shares[2][strlen(Cha)] = 0x80; shares[0][61] = shares[1][61] = shares[2][61] = (((strlen(Cha) * 8) + 512) >> 16) & 0xFF; shares[0][62] = shares[1][62] = shares[2][62] = (((strlen(Cha) * 8) + 512) >> 8) & 0xFF; shares[0][63] = shares[1][63] = shares[2][63] = ((strlen(Cha) * 8) + 512) & 0xFF; mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY); for (i = 0; i < 8; i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; memset(shares, 0, 3 * BLOCK_SIZE); for (i = 0; i < BLOCK_SIZE; i++) { shares[0][i] = ek[0][i] ^ 0x5c; shares[1][i] = ek[1][i] ^ 0x5c; shares[2][i] = ek[2][i] ^ 0x5c; } mpc_sha256(outerhash, shares, 512, 0, hA, randomness, randCount, views, countY); memset(shares, 0, 3 * BLOCK_SIZE); for (i = 0; i < 32; i++) { shares[0][i] = innerhash[0][i]; shares[1][i] = innerhash[1][i]; shares[2][i] = innerhash[2][i]; } shares[0][32] = shares[1][32] = shares[2][32] = 0x80; shares[0][62] = shares[1][62] = shares[2][62] = 3; mpc_sha256(results, shares, 512, 0, hA, randomness, randCount, views, countY); free(innerhash[0]); free(innerhash[1]); free(innerhash[2]); free(outerhash[0]); free(outerhash[1]); free(outerhash[2]); return 0; } a commit(int numBytes, unsigned char shares[3][BLOCK_SIZE], char *Cha, unsigned char *randomness[3], unsigned char rs[3][4], View views[3]) { unsigned char *hashes[3]; hashes[0] = malloc(32); hashes[1] = malloc(32); hashes[2] = malloc(32); int *randCount = calloc(1, sizeof(int)); int *countY = calloc(1, sizeof(int)); uint32_t hA[8][3]; int i; for (i = 0; i < 8; i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; *countY = 0; shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = 0x80; shares[0][62] = shares[1][62] = shares[2][62] = ((numBytes * 8) >> 8) & 0xFF; shares[0][63] = shares[1][63] = shares[2][63] = (numBytes * 8) & 0xFF; mpc_sha256(hashes, shares, numBytes * 8, 1, hA, randomness, randCount, views, countY); unsigned char *hmac[3]; hmac[0] = malloc(32); hmac[1] = malloc(32); hmac[2] = malloc(32); shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = shares[0][62] = shares[1][62] = shares[2][62] = shares[0][63] = shares[1][63] = shares[2][63] = 0; mpc_hmac_sha256(hmac, shares, numBytes, Cha, randomness, randCount, views, countY); //Explicitly add y to view free(randCount); for (int i = 0; i < 8; i++) { views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16) | (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3]; views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16) | (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3]; views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16) | (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3]; *countY += 1; } for (int i = 0; i < 8; i++) { views[0].y[*countY] = (hmac[0][i * 4] << 24) | (hmac[0][i * 4 + 1] << 16) | (hmac[0][i * 4 + 2] << 8) | hmac[0][i * 4 + 3]; views[1].y[*countY] = (hmac[1][i * 4] << 24) | (hmac[1][i * 4 + 1] << 16) | (hmac[1][i * 4 + 2] << 8) | hmac[1][i * 4 + 3]; views[2].y[*countY] = (hmac[2][i * 4] << 24) | (hmac[2][i * 4 + 1] << 16) | (hmac[2][i * 4 + 2] << 8) | hmac[2][i * 4 + 3]; *countY += 1; } free(countY); free(hashes[0]); free(hashes[1]); free(hashes[2]); free(hmac[0]); free(hmac[1]); free(hmac[2]); uint32_t *result11 = malloc(32); uint32_t *result21 = malloc(32); output(views[0], result11, result21); uint32_t *result12 = malloc(32); uint32_t *result22 = malloc(32); output(views[1], result12, result22); uint32_t *result13 = malloc(32); uint32_t *result23 = malloc(32); output(views[2], result13, result23); a a; memcpy(a.yp1[0], result11, 32); memcpy(a.yp1[1], result12, 32); memcpy(a.yp1[2], result13, 32); memcpy(a.yp2[0], result21, 32); memcpy(a.yp2[1], result22, 32); memcpy(a.yp2[2], result23, 32); free(result11); free(result12); free(result13); free(result21); free(result22); free(result23); return a; } z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) { z z; memcpy(z.ke, keys[e], 16); memcpy(z.ke1, keys[(e + 1) % 3], 16); z.ve = views[e]; z.ve1 = views[(e + 1) % 3]; memcpy(z.re, rs[e], 4); memcpy(z.re1, rs[(e + 1) % 3], 4); return z; } int GetNextSelected(int size, unsigned char *data, int *dataPtr) { int value = 0; int modulo = size; while (size > 0) { value <<= 8; value += (int)data[*dataPtr]; size >>= 8; (*dataPtr)++; } if (!(value & 0x01)) //will return odd number value++; return (int)value % modulo; } Merkle * BuildMerkleTree(int NumRounds, z * zs) { int i; Merkle *tempNode; Merkle *startNode = NULL; Merkle *childNode; Merkle *prevNode; int done = 0; int odd = 0; unsigned char datablock[64]; if ((!zs) || (NumRounds < 2)) return NULL; prevNode = NULL; for (i = 0; i < NumRounds; i++) { tempNode = malloc(sizeof(Merkle)); if (i == 0) startNode = tempNode; sha256(tempNode->data, (unsigned char *)&(zs[i]), sizeof(z) * 8); tempNode->parent = NULL; tempNode->type = 0; tempNode->next = NULL; tempNode->previous = prevNode; if (prevNode) prevNode->next = tempNode; if (!odd) { tempNode->sibling = NULL; odd = 1; } else { prevNode->sibling = tempNode; tempNode->sibling = prevNode; odd = 0; } prevNode = tempNode; } while (!done) { childNode = startNode; while (childNode->parent) childNode = childNode->parent; if (!childNode->sibling) { done = 1; continue; } odd = 0; prevNode = NULL; while (childNode != NULL) { tempNode = malloc(sizeof(Merkle)); tempNode->type = 1; childNode->parent = tempNode; tempNode->previous = prevNode; if (prevNode) prevNode->next = tempNode; tempNode->next = NULL; tempNode->parent = NULL; if (!odd) { tempNode->sibling = NULL; odd = 1; } else { prevNode->sibling = tempNode; tempNode->sibling = prevNode; odd = 0; } if (childNode->sibling) { childNode->sibling->parent = tempNode; memcpy(datablock, childNode->data, 32); memcpy(&(datablock[32]), childNode->sibling->data, 32); sha256(tempNode->data, datablock, 64 * 8); childNode = childNode->sibling->next; } else { memset(datablock, 0, sizeof(datablock)); memcpy(datablock, childNode->data, 32); sha256(tempNode->data, datablock, 64 * 8); childNode = childNode->sibling; } prevNode = tempNode; } } return startNode; } void DestroyMerkleTree(Merkle * startNode) { Merkle *tempNode; if (startNode->parent) DestroyMerkleTree(startNode->parent); startNode->parent = NULL; while (startNode) { tempNode = startNode->next; free(startNode); startNode = tempNode; } return; } #define NUM_LOOPS 1 int main(int argc, char *argv[]) { setbuf(stdout, NULL); srand((unsigned)time(NULL)); init_EVP(); openmp_thread_setup(); char CHALLENGE[BLOCK_SIZE]; char ek[BLOCK_SIZE]; //eval key is 447 bits // if (argc != 4) { printf("Usage: %s <number of rounds (e.g. 20, 40, 60, 80, 100)> <challenge (Max %d char)> <eval key (Max %d char)>\n", argv[0], MSG_SIZE, MSG_SIZE); return -1; } NUM_ROUNDS = atoi(argv[1]); if ((NUM_ROUNDS & 0x01) || (NUM_ROUNDS < 4)) { printf("Number of rounds should be even and > 4\n"); return -1; } unsigned char garbage[4]; if (RAND_bytes(garbage, 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } memset(CHALLENGE, 0, sizeof(CHALLENGE)); strncpy(CHALLENGE, argv[2], MSG_SIZE); //55 is max length as we only support 447 bits = 55.875 bytes memset(ek, 0, sizeof(ek)); strncpy(ek, argv[3], MSG_SIZE); int i = strlen(ek); printf("ek length: %d\n", i); printf("Iterations of ZKBdf: %d\n", NUM_ROUNDS); unsigned char input[BLOCK_SIZE]; //512 bits memset(input, 0, sizeof(input)); memcpy(input, ek, sizeof(input)); struct timeval begin, delta; gettimeofday(&begin, NULL); unsigned char rs[NUM_ROUNDS][3][4]; unsigned char keys[NUM_ROUNDS][3][16]; a as[NUM_ROUNDS]; View localViews[NUM_ROUNDS][3]; int totalCrypto = 0; z *zs; for (int loops = 0; loops < NUM_LOOPS; loops++) { //Generating keys if (RAND_bytes((unsigned char *)keys, NUM_ROUNDS * 3 * 16) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } if (RAND_bytes((unsigned char *)rs, NUM_ROUNDS * 3 * 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } //Sharing secrets unsigned char shares[NUM_ROUNDS][3][BLOCK_SIZE]; memset(shares, 0, NUM_ROUNDS * 3 * BLOCK_SIZE); if (RAND_bytes((unsigned char *)shares, NUM_ROUNDS * 3 * BLOCK_SIZE) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } for (int k = 0; k < NUM_ROUNDS; k++) { for (int j = 0; j < i; j++) { shares[k][2][j] = input[j] ^ shares[k][0][j] ^ shares[k][1][j]; } for (int j = i; j < BLOCK_SIZE; j++) { shares[k][2][j] = shares[k][0][j] = shares[k][1][j] = 0; } } unsigned char *randomness[NUM_ROUNDS][3]; int es[NUM_ROUNDS]; uint32_t finalHash1[8]; uint32_t finalHash2[8]; zs = malloc(sizeof(z) * NUM_ROUNDS); int r; for (r = 0; r < NUM_ROUNDS; r++) { unsigned char plaintext[16]; memset(plaintext, 0x30, sizeof(plaintext)); if (r != 0) { SHA256_CTX ctx; unsigned char prevroundhash[SHA256_DIGEST_LENGTH]; SHA256_Init(&ctx); SHA256_Update(&ctx, &(zs[r - 1]), sizeof(z)); SHA256_Final(prevroundhash, &ctx); memcpy(plaintext, prevroundhash, sizeof(plaintext)); } //Generating randomness // //for (int k = 0; k < (NUM_ROUNDS); k++) { for (int j = 0; j < 3; j++) { randomness[r][j] = malloc((ySize * 4) * sizeof(unsigned char)); getAllRandomness(keys[r][j], plaintext, randomness[r][j]); } // } //Running MPC - SHA2 // //for (int k = 0; k < NUM_ROUNDS; k++) { as[r] = commit(i, shares[r], CHALLENGE, randomness[r], rs[r], localViews[r]); for (int j = 0; j < 3; j++) { free(randomness[r][j]); } // } //Committing // //for (int k = 0; k < (NUM_ROUNDS); k++) { unsigned char hash1[SHA256_DIGEST_LENGTH]; memset(hash1, 0, sizeof(hash1)); H(keys[r][0], localViews[r][0], rs[r][0], hash1); memcpy(as[r].h[0], &hash1, 32); H(keys[r][1], localViews[r][1], rs[r][1], hash1); memcpy(as[r].h[1], &hash1, 32); H(keys[r][2], localViews[r][2], rs[r][2], hash1); memcpy(as[r].h[2], &hash1, 32); // } //Generating E if (r == 0) { for (int j = 0; j < 8; j++) { finalHash1[j] = as[0].yp1[0][j] ^ as[0].yp1[1][j] ^ as[0].yp1[2][j]; finalHash2[j] = as[0].yp2[0][j] ^ as[0].yp2[1][j] ^ as[0].yp2[2][j]; } printf("output H(ek) = "); for (int i = 0; i < 8; i++) { printf("%02X", finalHash1[i]); } printf("\n"); printf("output HMAC(ek,Challenge) = "); for (int i = 0; i < 8; i++) { printf("%02X", finalHash2[i]); } printf("\n"); } H3(finalHash1, finalHash2, &(as[r]), /* NUM_ROUNDS */ 1, &(es[r])); //Packing Z // //for (int i = 0; i < (NUM_ROUNDS); i++) { zs[r] = prove(es[r], keys[r], rs[r], localViews[r]); // } } } //now to extract the PCP proofs int PCProunds = (int)ceil(log(NUM_ROUNDS) / log(2)); int Totalselected = 0; unsigned char PCPselected[NUM_ROUNDS]; Merkle *startNode = NULL; Merkle *currNode = NULL; Merkle *tempNode = NULL; Merkle *rootNode = NULL; unsigned char MerkleHash[64]; unsigned char MerkleBranch[(32 * 2 * PCProunds) + 32]; int MerkleHashPtr; int Nextselected; startNode = BuildMerkleTree(NUM_ROUNDS, zs); rootNode = startNode; while (rootNode->parent) rootNode = rootNode->parent; memset(MerkleHash, 0, sizeof(MerkleHash)); memcpy(&(MerkleHash[32]), rootNode->data, 32); sha256(MerkleHash, MerkleHash, 64 * 8); MerkleHashPtr = 0; memset(PCPselected, 0, sizeof(PCPselected)); while (Totalselected < PCProunds) { Nextselected = GetNextSelected(NUM_ROUNDS, MerkleHash, &MerkleHashPtr); if (!PCPselected[Nextselected]) { PCPselected[Nextselected] = 1; Totalselected++; } if (MerkleHashPtr >= 32) { sha256(MerkleHash, MerkleHash, 64 * 8); MerkleHashPtr = 0; } } gettimeofday(&delta, NULL); unsigned long inMilli = (delta.tv_sec - begin.tv_sec) * 1000000 + (delta.tv_usec - begin.tv_usec); inMilli /= 1000; //Writing ZKBoo proofs to file FILE * file; char outputFile[3 * sizeof(int) + 8]; sprintf(outputFile, "out%i.bin", NUM_ROUNDS); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(as, sizeof(a), NUM_ROUNDS, file); fwrite(zs, sizeof(z), NUM_ROUNDS, file); fclose(file); //writing PCP proofs to file sprintf(outputFile, "pcp%i-%i.bin", NUM_ROUNDS, PCProunds); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } currNode = startNode; fwrite(rootNode->data, 32, 1, file); //write the root node first tempNode = startNode; for (int k = 0; k < NUM_ROUNDS; k++) { fwrite(tempNode->data, 32, 1, file); tempNode = tempNode->next; } for (int j = 0; j < NUM_ROUNDS; j++) { if (PCPselected[j]) { //print current node tempNode = currNode; memset(MerkleBranch, 0, sizeof(MerkleBranch)); MerkleHashPtr = 0; while (tempNode->parent != NULL) //write the current node { if (tempNode->sibling) { if (tempNode->sibling == tempNode->next) { memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->data, 32); MerkleHashPtr += 32; memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->sibling->data, 32); MerkleHashPtr += 32; } else { memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->sibling->data, 32); MerkleHashPtr += 32; memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->data, 32); MerkleHashPtr += 32; } } else { memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->data, 32); MerkleHashPtr += 64; } tempNode = tempNode->parent; } fwrite(MerkleBranch, MerkleHashPtr, 1, file); fwrite(&(as[j]), sizeof(a), 1, file); fwrite(&(zs[j]), sizeof(z), 1, file); fwrite(&(as[j - 1]), sizeof(a), 1, file); fwrite(&(zs[j - 1]), sizeof(z), 1, file); } currNode = currNode->next; } DestroyMerkleTree(startNode); fclose(file); free(zs); printf("Total time taken for %d loops: %d mili-seconds\n", NUM_LOOPS, inMilli); printf("Time per loop: %d mili-seconds\n", inMilli / NUM_LOOPS); printf("\n"); printf("zkboo Proof output to file %s", outputFile); openmp_thread_cleanup(); cleanup_EVP(); return EXIT_SUCCESS; }
/* * Name: zkbdf_eval.c Author: Tan Teik Guan Description: Eval function for VDF * realization using ZKBoo. Modified from MPC_SHA256.c */ /* * * ============================================================================ * Name : MPC_SHA256.c Author : Sobuno Version : 0.1 * Description : MPC SHA256 for one block only * =========================================================================== * = * */ #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "shared.h" #include <math.h> #include "omp.h" #define CH(e,f,g) ((e & f) ^ ((~e) & g)) int totalRandom = 0; int totalSha = 0; int totalSS = 0; int totalHash = 0; int NUM_ROUNDS = 100; uint32_t rand32() { uint32_t x; x = rand() & 0xff; x |= (rand() & 0xff) << 8; x |= (rand() & 0xff) << 16; x |= (rand() & 0xff) << 24; return x; } void printbits(uint32_t n) { if (n) { printbits(n >> 1); printf("%d", n & 1); } } void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) { z[0] = x[0] ^ y[0]; z[1] = x[1] ^ y[1]; z[2] = x[2] ^ y[2]; } void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint32_t t[3] = {0}; t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1]; t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2]; t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0]; z[0] = t[0]; z[1] = t[1]; z[2] = t[2]; views[0].y[*countY] = z[0]; views[1].y[*countY] = z[1]; views[2].y[*countY] = z[2]; (*countY)++; } void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) { z[0] = ~x[0]; z[1] = ~x[1]; z[2] = ~x[2]; } void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t c[3] = {0}; uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for (int i = 0; i < 31; i++) { a[0] = GETBIT(x[0] ^ c[0], i); a[1] = GETBIT(x[1] ^ c[1], i); a[2] = GETBIT(x[2] ^ c[2], i); b[0] = GETBIT(y[0] ^ c[0], i); b[1] = GETBIT(y[1] ^ c[1], i); b[2] = GETBIT(y[2] ^ c[2], i); t = (a[0] & b[1]) ^ (a[1] & b[0]) ^ GETBIT(r[1], i); SETBIT(c[0], i + 1, t ^ (a[0] & b[0]) ^ GETBIT(c[0], i) ^ GETBIT(r[0], i)); t = (a[1] & b[2]) ^ (a[2] & b[1]) ^ GETBIT(r[2], i); SETBIT(c[1], i + 1, t ^ (a[1] & b[1]) ^ GETBIT(c[1], i) ^ GETBIT(r[1], i)); t = (a[2] & b[0]) ^ (a[0] & b[2]) ^ GETBIT(r[0], i); SETBIT(c[2], i + 1, t ^ (a[2] & b[2]) ^ GETBIT(c[2], i) ^ GETBIT(r[2], i)); } z[0] = x[0] ^ y[0] ^ c[0]; z[1] = x[1] ^ y[1] ^ c[1]; z[2] = x[2] ^ y[2] ^ c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t c[3] = {0}; uint32_t r[3] = {getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)}; *randCount += 4; uint8_t a[3], b[3]; uint8_t t; for (int i = 0; i < 31; i++) { a[0] = GETBIT(x[0] ^ c[0], i); a[1] = GETBIT(x[1] ^ c[1], i); a[2] = GETBIT(x[2] ^ c[2], i); b[0] = GETBIT(y ^ c[0], i); b[1] = GETBIT(y ^ c[1], i); b[2] = GETBIT(y ^ c[2], i); t = (a[0] & b[1]) ^ (a[1] & b[0]) ^ GETBIT(r[1], i); SETBIT(c[0], i + 1, t ^ (a[0] & b[0]) ^ GETBIT(c[0], i) ^ GETBIT(r[0], i)); t = (a[1] & b[2]) ^ (a[2] & b[1]) ^ GETBIT(r[2], i); SETBIT(c[1], i + 1, t ^ (a[1] & b[1]) ^ GETBIT(c[1], i) ^ GETBIT(r[1], i)); t = (a[2] & b[0]) ^ (a[0] & b[2]) ^ GETBIT(r[0], i); SETBIT(c[2], i + 1, t ^ (a[2] & b[2]) ^ GETBIT(c[2], i) ^ GETBIT(r[2], i)); } z[0] = x[0] ^ y ^ c[0]; z[1] = x[1] ^ y ^ c[1]; z[2] = x[2] ^ y ^ c[2]; views[0].y[*countY] = c[0]; views[1].y[*countY] = c[1]; views[2].y[*countY] = c[2]; *countY += 1; } int sha256(unsigned char *result, unsigned char *input, int numBits) { uint32_t hA[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; int remainingBits = numBits; int chars; int i; while (remainingBits >= 0) { if (remainingBits > 447) { chars = 64; remainingBits -= 512; } else { chars = remainingBits >> 3; remainingBits = -1; } unsigned char *chunk = calloc(64, 1); //512 bits memcpy(chunk, input, chars); input += chars; if (chars < 64) { chunk[chars] = 0x80; chunk[60] = numBits >> 24; chunk[61] = numBits >> 16; chunk[62] = numBits >> 8; chunk[63] = numBits; } uint32_t w[64]; for (i = 0; i < 16; i++) { w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16) | (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3]; } uint32_t s0, s1; for (i = 16; i < 64; i++) { s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18) ^ (w[i - 15] >> 3); s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19) ^ (w[i - 2] >> 10); w[i] = w[i - 16] + s0 + w[i - 7] + s1; } uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj; a = hA[0]; b = hA[1]; c = hA[2]; d = hA[3]; e = hA[4]; f = hA[5]; g = hA[6]; h = hA[7]; for (i = 0; i < 64; i++) { s1 = RIGHTROTATE(e, 6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; s0 = RIGHTROTATE(a, 2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); maj = (a & (b ^ c)) ^ (b & c); temp2 = s0 + maj; h = g; g = f; f = e; e = d + temp1; d = c; c = b; b = a; a = temp1 + temp2; } hA[0] += a; hA[1] += b; hA[2] += c; hA[3] += d; hA[4] += e; hA[5] += f; hA[6] += g; hA[7] += h; } for (i = 0; i < 8; i++) { result[i * 4] = (hA[i] >> 24); result[i * 4 + 1] = (hA[i] >> 16); result[i * 4 + 2] = (hA[i] >> 8); result[i * 4 + 3] = hA[i]; } return 0; } void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) { z[0] = RIGHTROTATE(x[0], i); z[1] = RIGHTROTATE(x[1], i); z[2] = RIGHTROTATE(x[2], i); } void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) { z[0] = x[0] >> i; z[1] = x[1] >> i; z[2] = x[2] >> i; } void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t t0[3]; uint32_t t1[3]; mpc_XOR(a, b, t0); mpc_XOR(a, c, t1); mpc_AND(t0, t1, z, randomness, randCount, views, countY); mpc_XOR(z, a, z); } void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { uint32_t t0[3]; //e & (f ^ g) ^ g mpc_XOR(f, g, t0); mpc_AND(e, t0, t0, randomness, randCount, views, countY); mpc_XOR(t0, g, z); } int mpc_sha256(unsigned char *results[3], unsigned char inputs[3][BLOCK_SIZE], int numBits, int addView, uint32_t hA[8][3], unsigned char *randomness[3], int *randCount, View views[3], int *countY) { /* * if (numBits > 447) { printf("Input too long, aborting!"); return -1; } */ int chars = numBits >> 3; unsigned char *chunks[3]; uint32_t w[64][3]; uint32_t msg[MSG_SIZE / 4]; /* * if (addMsg) { for (int j=0;j<(numBits/32);j++) { msg[j] = * (addMsg[j*4]<<24) | (addMsg[j*4+1]<<16) | (addMsg[j*4+2] << 8) | * (addMsg[j*4+3]); * * } * } */ for (int i = 0; i < 64; i++) { w[i][0] = w[i][1] = w[i][2] = 0; } for (int i = 0; i < 3; i++) { chunks[i] = calloc(64, 1); //512 bits memcpy(chunks[i], inputs[i], BLOCK_SIZE /* chars */ ); /* * chunks[i][chars] = 0x80; //Last 8 chars used for storing length of * input without padding, in big-endian. //Since we only care for one * block, we are safe with just using last 9 bits and 0'ing the rest * * chunk[60] = numBits >> 24; chunk[61] = numBits >> 16; chunks[i][62] = * numBits >> 8; chunks[i][63] = numBits; */ if (addView) memcpy(views[i].x, chunks[i], 64); for (int j = 0; j < 16; j++) { w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16) | (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3]; } free(chunks[i]); } uint32_t s0[3], s1[3]; uint32_t t0[3], t1[3]; for (int j = 16; j < 64; j++) { //s0[i] = RIGHTROTATE(w[i][j - 15], 7) ^ RIGHTROTATE(w[i][j - 15], 18) ^ (w[i][j - 15] >> 3); mpc_RIGHTROTATE(w[j - 15], 7, t0); mpc_RIGHTROTATE(w[j - 15], 18, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j - 15], 3, t1); mpc_XOR(t0, t1, s0); //s1[i] = RIGHTROTATE(w[i][j - 2], 17) ^ RIGHTROTATE(w[i][j - 2], 19) ^ (w[i][j - 2] >> 10); mpc_RIGHTROTATE(w[j - 2], 17, t0); mpc_RIGHTROTATE(w[j - 2], 19, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTSHIFT(w[j - 2], 10, t1); mpc_XOR(t0, t1, s1); //w[i][j] = w[i][j - 16] + s0[i] + w[i][j - 7] + s1[i]; mpc_ADD(w[j - 16], s0, t1, randomness, randCount, views, countY); mpc_ADD(w[j - 7], t1, t1, randomness, randCount, views, countY); mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY); } uint32_t a[3] = {hA[0][0], hA[0][1], hA[0][2]}; uint32_t b[3] = {hA[1][0], hA[1][1], hA[1][2]}; uint32_t c[3] = {hA[2][0], hA[2][1], hA[2][2]}; uint32_t d[3] = {hA[3][0], hA[3][1], hA[3][2]}; uint32_t e[3] = {hA[4][0], hA[4][1], hA[4][2]}; uint32_t f[3] = {hA[5][0], hA[5][1], hA[5][2]}; uint32_t g[3] = {hA[6][0], hA[6][1], hA[6][2]}; uint32_t h[3] = {hA[7][0], hA[7][1], hA[7][2]}; uint32_t temp1[3], temp2[3], maj[3]; for (int i = 0; i < 64; i++) { //s1 = RIGHTROTATE(e, 6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25); mpc_RIGHTROTATE(e, 6, t0); mpc_RIGHTROTATE(e, 11, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(e, 25, t1); mpc_XOR(t0, t1, s1); //ch = (e & f) ^ ((~e) & g); //temp1 = h + s1 + CH(e, f, g) + k[i] + w[i]; //t0 = h + s1 mpc_ADD(h, s1, t0, randomness, randCount, views, countY); mpc_CH(e, f, g, t1, randomness, randCount, views, countY); //t1 = t0 + t1(h + s1 + ch) mpc_ADD(t0, t1, t1, randomness, randCount, views, countY); mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY); mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY); //s0 = RIGHTROTATE(a, 2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22); mpc_RIGHTROTATE(a, 2, t0); mpc_RIGHTROTATE(a, 13, t1); mpc_XOR(t0, t1, t0); mpc_RIGHTROTATE(a, 22, t1); mpc_XOR(t0, t1, s0); mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY); //temp2 = s0 + maj; mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY); memcpy(h, g, sizeof(uint32_t) * 3); memcpy(g, f, sizeof(uint32_t) * 3); memcpy(f, e, sizeof(uint32_t) * 3); //e = d + temp1; mpc_ADD(d, temp1, e, randomness, randCount, views, countY); memcpy(d, c, sizeof(uint32_t) * 3); memcpy(c, b, sizeof(uint32_t) * 3); memcpy(b, a, sizeof(uint32_t) * 3); //a = temp1 + temp2; mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY); } /* * uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, * { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] }, { hA[4],hA[4],hA[4] }, { * hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } }; */ uint32_t hHa[8][3]; mpc_ADD(hA[0], a, hHa[0], randomness, randCount, views, countY); mpc_ADD(hA[1], b, hHa[1], randomness, randCount, views, countY); mpc_ADD(hA[2], c, hHa[2], randomness, randCount, views, countY); mpc_ADD(hA[3], d, hHa[3], randomness, randCount, views, countY); mpc_ADD(hA[4], e, hHa[4], randomness, randCount, views, countY); mpc_ADD(hA[5], f, hHa[5], randomness, randCount, views, countY); mpc_ADD(hA[6], g, hHa[6], randomness, randCount, views, countY); mpc_ADD(hA[7], h, hHa[7], randomness, randCount, views, countY); for (int i = 0; i < 8; i++) { hA[i][0] = hHa[i][0]; hA[i][1] = hHa[i][1]; hA[i][2] = hHa[i][2]; } for (int i = 0; i < 8; i++) { mpc_RIGHTSHIFT(hHa[i], 24, t0); results[0][i * 4] = t0[0]; results[1][i * 4] = t0[1]; results[2][i * 4] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 16, t0); results[0][i * 4 + 1] = t0[0]; results[1][i * 4 + 1] = t0[1]; results[2][i * 4 + 1] = t0[2]; mpc_RIGHTSHIFT(hHa[i], 8, t0); results[0][i * 4 + 2] = t0[0]; results[1][i * 4 + 2] = t0[1]; results[2][i * 4 + 2] = t0[2]; results[0][i * 4 + 3] = hHa[i][0]; results[1][i * 4 + 3] = hHa[i][1]; results[2][i * 4 + 3] = hHa[i][2]; } return 0; } int writeToFile(char filename[], void *data, int size, int numItems) { FILE *file; file = fopen(filename, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(data, size, numItems, file); fclose(file); return 0; } int secretShare(unsigned char *input, int numBytes, unsigned char output[3][numBytes]) { if (RAND_bytes(output[0], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } if (RAND_bytes(output[1], numBytes) != 1) { printf("RAND_bytes failed crypto, aborting\n"); } for (int j = 0; j < numBytes; j++) { output[2][j] = input[j] ^ output[0][j] ^ output[1][j]; } return 0; } int mpc_hmac_sha256(unsigned char *results[3], unsigned char ek[3][BLOCK_SIZE], int numBytes, char *Cha, unsigned char *randomness[3], int *randCount, View views[3], int *countY) { unsigned char shares[3][BLOCK_SIZE]; uint32_t hA[8][3]; int i; unsigned char *innerhash[3], *outerhash[3]; innerhash[0] = malloc(32); innerhash[1] = malloc(32); innerhash[2] = malloc(32); outerhash[0] = malloc(32); outerhash[1] = malloc(32); outerhash[2] = malloc(32); if (strlen(Cha) > MSG_SIZE) { printf("Input too long, aborting!"); return -1; } for (i = 0; i < 8; i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; memset(shares[0], 0, sizeof(shares[0])); memset(shares[1], 0, sizeof(shares[1])); memset(shares[2], 0, sizeof(shares[2])); for (i = 0; i < BLOCK_SIZE; i++) { shares[0][i] = ek[0][i] ^ 0x36; shares[1][i] = ek[1][i] ^ 0x36; shares[2][i] = ek[2][i] ^ 0x36; } mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY); memset(shares[0], 0, sizeof(shares[0])); memset(shares[1], 0, sizeof(shares[1])); memset(shares[2], 0, sizeof(shares[2])); for (i = 0; i < strlen(Cha); i++) { shares[0][i] = Cha[i]; shares[1][i] = Cha[i]; shares[2][i] = Cha[i]; } shares[0][strlen(Cha)] = shares[1][strlen(Cha)] = shares[2][strlen(Cha)] = 0x80; shares[0][61] = shares[1][61] = shares[2][61] = (((strlen(Cha) * 8) + 512) >> 16) & 0xFF; shares[0][62] = shares[1][62] = shares[2][62] = (((strlen(Cha) * 8) + 512) >> 8) & 0xFF; shares[0][63] = shares[1][63] = shares[2][63] = ((strlen(Cha) * 8) + 512) & 0xFF; mpc_sha256(innerhash, shares, 512, 0, hA, randomness, randCount, views, countY); for (i = 0; i < 8; i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; memset(shares, 0, 3 * BLOCK_SIZE); for (i = 0; i < BLOCK_SIZE; i++) { shares[0][i] = ek[0][i] ^ 0x5c; shares[1][i] = ek[1][i] ^ 0x5c; shares[2][i] = ek[2][i] ^ 0x5c; } mpc_sha256(outerhash, shares, 512, 0, hA, randomness, randCount, views, countY); memset(shares, 0, 3 * BLOCK_SIZE); for (i = 0; i < 32; i++) { shares[0][i] = innerhash[0][i]; shares[1][i] = innerhash[1][i]; shares[2][i] = innerhash[2][i]; } shares[0][32] = shares[1][32] = shares[2][32] = 0x80; shares[0][62] = shares[1][62] = shares[2][62] = 3; mpc_sha256(results, shares, 512, 0, hA, randomness, randCount, views, countY); free(innerhash[0]); free(innerhash[1]); free(innerhash[2]); free(outerhash[0]); free(outerhash[1]); free(outerhash[2]); return 0; } a commit(int numBytes, unsigned char shares[3][BLOCK_SIZE], char *Cha, unsigned char *randomness[3], unsigned char rs[3][4], View views[3]) { unsigned char *hashes[3]; hashes[0] = malloc(32); hashes[1] = malloc(32); hashes[2] = malloc(32); int *randCount = calloc(1, sizeof(int)); int *countY = calloc(1, sizeof(int)); uint32_t hA[8][3]; int i; for (i = 0; i < 8; i++) hA[i][0] = hA[i][1] = hA[i][2] = ihA[i]; *countY = 0; shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = 0x80; shares[0][62] = shares[1][62] = shares[2][62] = ((numBytes * 8) >> 8) & 0xFF; shares[0][63] = shares[1][63] = shares[2][63] = (numBytes * 8) & 0xFF; mpc_sha256(hashes, shares, numBytes * 8, 1, hA, randomness, randCount, views, countY); unsigned char *hmac[3]; hmac[0] = malloc(32); hmac[1] = malloc(32); hmac[2] = malloc(32); shares[0][numBytes] = shares[1][numBytes] = shares[2][numBytes] = shares[0][62] = shares[1][62] = shares[2][62] = shares[0][63] = shares[1][63] = shares[2][63] = 0; mpc_hmac_sha256(hmac, shares, numBytes, Cha, randomness, randCount, views, countY); //Explicitly add y to view free(randCount); for (int i = 0; i < 8; i++) { views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16) | (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3]; views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16) | (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3]; views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16) | (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3]; *countY += 1; } for (int i = 0; i < 8; i++) { views[0].y[*countY] = (hmac[0][i * 4] << 24) | (hmac[0][i * 4 + 1] << 16) | (hmac[0][i * 4 + 2] << 8) | hmac[0][i * 4 + 3]; views[1].y[*countY] = (hmac[1][i * 4] << 24) | (hmac[1][i * 4 + 1] << 16) | (hmac[1][i * 4 + 2] << 8) | hmac[1][i * 4 + 3]; views[2].y[*countY] = (hmac[2][i * 4] << 24) | (hmac[2][i * 4 + 1] << 16) | (hmac[2][i * 4 + 2] << 8) | hmac[2][i * 4 + 3]; *countY += 1; } free(countY); free(hashes[0]); free(hashes[1]); free(hashes[2]); free(hmac[0]); free(hmac[1]); free(hmac[2]); uint32_t *result11 = malloc(32); uint32_t *result21 = malloc(32); output(views[0], result11, result21); uint32_t *result12 = malloc(32); uint32_t *result22 = malloc(32); output(views[1], result12, result22); uint32_t *result13 = malloc(32); uint32_t *result23 = malloc(32); output(views[2], result13, result23); a a; memcpy(a.yp1[0], result11, 32); memcpy(a.yp1[1], result12, 32); memcpy(a.yp1[2], result13, 32); memcpy(a.yp2[0], result21, 32); memcpy(a.yp2[1], result22, 32); memcpy(a.yp2[2], result23, 32); free(result11); free(result12); free(result13); free(result21); free(result22); free(result23); return a; } z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) { z z; memcpy(z.ke, keys[e], 16); memcpy(z.ke1, keys[(e + 1) % 3], 16); z.ve = views[e]; z.ve1 = views[(e + 1) % 3]; memcpy(z.re, rs[e], 4); memcpy(z.re1, rs[(e + 1) % 3], 4); return z; } int GetNextSelected(int size, unsigned char *data, int *dataPtr) { int value = 0; int modulo = size; while (size > 0) { value <<= 8; value += (int)data[*dataPtr]; size >>= 8; (*dataPtr)++; } if (!(value & 0x01)) //will return odd number value++; return (int)value % modulo; } Merkle * BuildMerkleTree(int NumRounds, z * zs) { int i; Merkle *tempNode; Merkle *startNode = NULL; Merkle *childNode; Merkle *prevNode; int done = 0; int odd = 0; unsigned char datablock[64]; if ((!zs) || (NumRounds < 2)) return NULL; prevNode = NULL; for (i = 0; i < NumRounds; i++) { tempNode = malloc(sizeof(Merkle)); if (i == 0) startNode = tempNode; sha256(tempNode->data, (unsigned char *)&(zs[i]), sizeof(z) * 8); tempNode->parent = NULL; tempNode->type = 0; tempNode->next = NULL; tempNode->previous = prevNode; if (prevNode) prevNode->next = tempNode; if (!odd) { tempNode->sibling = NULL; odd = 1; } else { prevNode->sibling = tempNode; tempNode->sibling = prevNode; odd = 0; } prevNode = tempNode; } while (!done) { childNode = startNode; while (childNode->parent) childNode = childNode->parent; if (!childNode->sibling) { done = 1; continue; } odd = 0; prevNode = NULL; while (childNode != NULL) { tempNode = malloc(sizeof(Merkle)); tempNode->type = 1; childNode->parent = tempNode; tempNode->previous = prevNode; if (prevNode) prevNode->next = tempNode; tempNode->next = NULL; tempNode->parent = NULL; if (!odd) { tempNode->sibling = NULL; odd = 1; } else { prevNode->sibling = tempNode; tempNode->sibling = prevNode; odd = 0; } if (childNode->sibling) { childNode->sibling->parent = tempNode; memcpy(datablock, childNode->data, 32); memcpy(&(datablock[32]), childNode->sibling->data, 32); sha256(tempNode->data, datablock, 64 * 8); childNode = childNode->sibling->next; } else { memset(datablock, 0, sizeof(datablock)); memcpy(datablock, childNode->data, 32); sha256(tempNode->data, datablock, 64 * 8); childNode = childNode->sibling; } prevNode = tempNode; } } return startNode; } void DestroyMerkleTree(Merkle * startNode) { Merkle *tempNode; if (startNode->parent) DestroyMerkleTree(startNode->parent); startNode->parent = NULL; while (startNode) { tempNode = startNode->next; free(startNode); startNode = tempNode; } return; } #define NUM_LOOPS 1 int main(int argc, char *argv[]) { setbuf(stdout, NULL); srand((unsigned)time(NULL)); init_EVP(); openmp_thread_setup(); char CHALLENGE[BLOCK_SIZE]; char ek[BLOCK_SIZE]; //eval key is 447 bits // if (argc != 4) { printf("Usage: %s <number of rounds (e.g. 20, 40, 60, 80, 100)> <challenge (Max %d char)> <eval key (Max %d char)>\n", argv[0], MSG_SIZE, MSG_SIZE); return -1; } NUM_ROUNDS = atoi(argv[1]); if ((NUM_ROUNDS & 0x01) || (NUM_ROUNDS < 4)) { printf("Number of rounds should be even and > 4\n"); return -1; } unsigned char garbage[4]; if (RAND_bytes(garbage, 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } memset(CHALLENGE, 0, sizeof(CHALLENGE)); strncpy(CHALLENGE, argv[2], MSG_SIZE); //55 is max length as we only support 447 bits = 55.875 bytes memset(ek, 0, sizeof(ek)); strncpy(ek, argv[3], MSG_SIZE); int i = strlen(ek); printf("ek length: %d\n", i); printf("Iterations of ZKBdf: %d\n", NUM_ROUNDS); unsigned char input[BLOCK_SIZE]; //512 bits memset(input, 0, sizeof(input)); memcpy(input, ek, sizeof(input)); struct timeval begin, delta; gettimeofday(&begin, NULL); unsigned char rs[NUM_ROUNDS][3][4]; unsigned char keys[NUM_ROUNDS][3][16]; a as[NUM_ROUNDS]; View localViews[NUM_ROUNDS][3]; int totalCrypto = 0; z *zs; for (int loops = 0; loops < NUM_LOOPS; loops++) { //Generating keys if (RAND_bytes((unsigned char *)keys, NUM_ROUNDS * 3 * 16) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } if (RAND_bytes((unsigned char *)rs, NUM_ROUNDS * 3 * 4) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } //Sharing secrets unsigned char shares[NUM_ROUNDS][3][BLOCK_SIZE]; memset(shares, 0, NUM_ROUNDS * 3 * BLOCK_SIZE); if (RAND_bytes((unsigned char *)shares, NUM_ROUNDS * 3 * BLOCK_SIZE) != 1) { printf("RAND_bytes failed crypto, aborting\n"); return 0; } #pragma omp parallel for for (int k = 0; k < NUM_ROUNDS; k++) { for (int j = 0; j < i; j++) { shares[k][2][j] = input[j] ^ shares[k][0][j] ^ shares[k][1][j]; } for (int j = i; j < BLOCK_SIZE; j++) { shares[k][2][j] = shares[k][0][j] = shares[k][1][j] = 0; } } unsigned char *randomness[NUM_ROUNDS][3]; int es[NUM_ROUNDS]; uint32_t finalHash1[8]; uint32_t finalHash2[8]; zs = malloc(sizeof(z) * NUM_ROUNDS); int r; for (r = 0; r < NUM_ROUNDS; r++) { unsigned char plaintext[16]; memset(plaintext, 0x30, sizeof(plaintext)); if (r != 0) { SHA256_CTX ctx; unsigned char prevroundhash[SHA256_DIGEST_LENGTH]; SHA256_Init(&ctx); SHA256_Update(&ctx, &(zs[r - 1]), sizeof(z)); SHA256_Final(prevroundhash, &ctx); memcpy(plaintext, prevroundhash, sizeof(plaintext)); } //Generating randomness // #pragma omp parallel for //for (int k = 0; k < (NUM_ROUNDS); k++) { for (int j = 0; j < 3; j++) { randomness[r][j] = malloc((ySize * 4) * sizeof(unsigned char)); getAllRandomness(keys[r][j], plaintext, randomness[r][j]); } // } //Running MPC - SHA2 // #pragma omp parallel for //for (int k = 0; k < NUM_ROUNDS; k++) { as[r] = commit(i, shares[r], CHALLENGE, randomness[r], rs[r], localViews[r]); for (int j = 0; j < 3; j++) { free(randomness[r][j]); } // } //Committing // #pragma omp parallel for //for (int k = 0; k < (NUM_ROUNDS); k++) { unsigned char hash1[SHA256_DIGEST_LENGTH]; memset(hash1, 0, sizeof(hash1)); H(keys[r][0], localViews[r][0], rs[r][0], hash1); memcpy(as[r].h[0], &hash1, 32); H(keys[r][1], localViews[r][1], rs[r][1], hash1); memcpy(as[r].h[1], &hash1, 32); H(keys[r][2], localViews[r][2], rs[r][2], hash1); memcpy(as[r].h[2], &hash1, 32); // } //Generating E if (r == 0) { for (int j = 0; j < 8; j++) { finalHash1[j] = as[0].yp1[0][j] ^ as[0].yp1[1][j] ^ as[0].yp1[2][j]; finalHash2[j] = as[0].yp2[0][j] ^ as[0].yp2[1][j] ^ as[0].yp2[2][j]; } printf("output H(ek) = "); for (int i = 0; i < 8; i++) { printf("%02X", finalHash1[i]); } printf("\n"); printf("output HMAC(ek,Challenge) = "); for (int i = 0; i < 8; i++) { printf("%02X", finalHash2[i]); } printf("\n"); } H3(finalHash1, finalHash2, &(as[r]), /* NUM_ROUNDS */ 1, &(es[r])); //Packing Z // #pragma omp parallel for //for (int i = 0; i < (NUM_ROUNDS); i++) { zs[r] = prove(es[r], keys[r], rs[r], localViews[r]); // } } } //now to extract the PCP proofs int PCProunds = (int)ceil(log(NUM_ROUNDS) / log(2)); int Totalselected = 0; unsigned char PCPselected[NUM_ROUNDS]; Merkle *startNode = NULL; Merkle *currNode = NULL; Merkle *tempNode = NULL; Merkle *rootNode = NULL; unsigned char MerkleHash[64]; unsigned char MerkleBranch[(32 * 2 * PCProunds) + 32]; int MerkleHashPtr; int Nextselected; startNode = BuildMerkleTree(NUM_ROUNDS, zs); rootNode = startNode; while (rootNode->parent) rootNode = rootNode->parent; memset(MerkleHash, 0, sizeof(MerkleHash)); memcpy(&(MerkleHash[32]), rootNode->data, 32); sha256(MerkleHash, MerkleHash, 64 * 8); MerkleHashPtr = 0; memset(PCPselected, 0, sizeof(PCPselected)); while (Totalselected < PCProunds) { Nextselected = GetNextSelected(NUM_ROUNDS, MerkleHash, &MerkleHashPtr); if (!PCPselected[Nextselected]) { PCPselected[Nextselected] = 1; Totalselected++; } if (MerkleHashPtr >= 32) { sha256(MerkleHash, MerkleHash, 64 * 8); MerkleHashPtr = 0; } } gettimeofday(&delta, NULL); unsigned long inMilli = (delta.tv_sec - begin.tv_sec) * 1000000 + (delta.tv_usec - begin.tv_usec); inMilli /= 1000; //Writing ZKBoo proofs to file FILE * file; char outputFile[3 * sizeof(int) + 8]; sprintf(outputFile, "out%i.bin", NUM_ROUNDS); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } fwrite(as, sizeof(a), NUM_ROUNDS, file); fwrite(zs, sizeof(z), NUM_ROUNDS, file); fclose(file); //writing PCP proofs to file sprintf(outputFile, "pcp%i-%i.bin", NUM_ROUNDS, PCProunds); file = fopen(outputFile, "wb"); if (!file) { printf("Unable to open file!"); return 1; } currNode = startNode; fwrite(rootNode->data, 32, 1, file); //write the root node first tempNode = startNode; for (int k = 0; k < NUM_ROUNDS; k++) { fwrite(tempNode->data, 32, 1, file); tempNode = tempNode->next; } for (int j = 0; j < NUM_ROUNDS; j++) { if (PCPselected[j]) { //print current node tempNode = currNode; memset(MerkleBranch, 0, sizeof(MerkleBranch)); MerkleHashPtr = 0; while (tempNode->parent != NULL) //write the current node { if (tempNode->sibling) { if (tempNode->sibling == tempNode->next) { memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->data, 32); MerkleHashPtr += 32; memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->sibling->data, 32); MerkleHashPtr += 32; } else { memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->sibling->data, 32); MerkleHashPtr += 32; memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->data, 32); MerkleHashPtr += 32; } } else { memcpy(&(MerkleBranch[MerkleHashPtr]), tempNode->data, 32); MerkleHashPtr += 64; } tempNode = tempNode->parent; } fwrite(MerkleBranch, MerkleHashPtr, 1, file); fwrite(&(as[j]), sizeof(a), 1, file); fwrite(&(zs[j]), sizeof(z), 1, file); fwrite(&(as[j - 1]), sizeof(a), 1, file); fwrite(&(zs[j - 1]), sizeof(z), 1, file); } currNode = currNode->next; } DestroyMerkleTree(startNode); fclose(file); free(zs); printf("Total time taken for %d loops: %d mili-seconds\n", NUM_LOOPS, inMilli); printf("Time per loop: %d mili-seconds\n", inMilli / NUM_LOOPS); printf("\n"); printf("zkboo Proof output to file %s", outputFile); openmp_thread_cleanup(); cleanup_EVP(); return EXIT_SUCCESS; }
image2pdf.c
/** @file image2pdf.c * * Oyranos is an open source Color Management System * * Copyright (C) 2008-2012 Kai-Uwe Behrmann * */ /** * @brief public Oyranos API's * @internal * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2008/09/26 * * A sample application on how to compose a set of camera raw files into a pdf, * while keeping attention to user color settings. * Beside Cairo and Oyranos you need DCraw during runtime. * * Compile: cc -pedantic -Wall -g `oyranos-config --cflags` `oyranos-config --ld_x_flags` `pkg-config --cflags --libs cairo` image2pdf.c -o image2pdf */ # include <stddef.h> #define __USE_POSIX2 1 #include <stdio.h> /* popen() */ FILE *popen ( const char *__command, const char *__modes); #include <math.h> #include <string.h> #include <oyConversion_s.h> #include <oyProfile_s.h> /* Oyranos headers */ #include <oyranos_devices.h> #include <cairo.h> /* Cairo headers */ #include <cairo-pdf.h> #define MIN(a,b) ((a<b)?(a):(b)) #define MAX(a,b) ((a>b)?(a):(b)) int main (int argc, char ** argv) { int result = 0; int i,j,o, error; cairo_t * cr = 0; cairo_surface_t * surface = NULL, * image_surf = NULL; cairo_status_t status; double page_w = 210.0, /* page width in mm */ page_h = 297.0, resolution = 72.0, /* Cairo PDF surface resolution */ scale = 1.0, frame = 0; int pixel_w, pixel_h, /* page size in pixel */ x,y,w=0,h=0, /* image dimensions */ to_moni = 0; size_t size = 0; unsigned char * image_data = 0, rgba[4] = {127,127,127,255}; oyProfile_s * monitor, * print, * output; oyConversion_s * to_output = 0; oyConfig_s * device = 0; uint32_t icc_profile_flags = 0; if(argc < 2) { printf("Merge some CamerRAW images into one output image and use\n"); printf("Oyranos CMS settings to obtain the result.\n"); printf("\n"); printf("Usage of the image2pdf example application:"); printf(" To obtain a PDF (test.pdf):\n"); printf(" image2pdf imageA.raw imageB.raw\n"); printf(" To obtain a monitor preview (test.png):\n"); printf(" image2pdf --monitor imageA.raw imageB.raw\n"); return 1; } o = 1; if(strcmp(argv[o],"--monitor") == 0 || strcmp(argv[o],"-m") == 0) { ++o; to_moni = 1; resolution = 96; } pixel_w = page_w / 25.4 * resolution; pixel_h = page_h / 25.4 * resolution; /* create a surface to place our images on */ if(to_moni) surface = cairo_image_surface_create( CAIRO_FORMAT_ARGB32, pixel_w,pixel_h); else surface = cairo_pdf_surface_create( "test.pdf", pixel_w, pixel_h ); status = cairo_surface_status( surface ); if(status) return 1; /* select profiles matching actual capabilities */ icc_profile_flags = oyICCProfileSelectionFlagsFromOptions( OY_CMM_STD, "//" OY_TYPE_STD "/icc_color", NULL, 0 ); /* The monitor profile is located in the Xserver. For details see: * http://www.freedesktop.org/wiki/Specifications/icc_profiles_in_x_spec */ error = oyDeviceGet( OY_TYPE_STD, "monitor", 0, 0, &device ); error = oyDeviceGetProfile( device, 0, &monitor ); if(error > 0) fprintf(stderr, "oyDeviceGetProfile error: %d\n", error); printf("monitor: %s\n", oyProfile_GetText( monitor, oyNAME_DESCRIPTION ) ); /* The output profile is equal to sRGB, as output profiles are curently not * supported in Cairo. */ print = oyProfile_FromStd( oyASSUMED_WEB, icc_profile_flags, 0 ); printf("print: %s\n", oyProfile_GetText( print, oyNAME_DESCRIPTION )); cr = cairo_create( surface ); cairo_set_source_rgba( cr, rgba[0]/255., rgba[1]/255., rgba[2]/255., 1.0 ); cairo_rectangle( cr, 0, 0, pixel_w, pixel_h ); cairo_fill( cr ); for ( i=0; i < argc-o; ++i ) { const char * filename = argv[i+o]; oyOptions_s * options = NULL; oyImage_s * in = NULL, * out = NULL; error = oyImage_FromFile( filename, icc_profile_flags, &in, NULL ); w = oyImage_GetWidth( in ); h = oyImage_GetHeight( in ); if(!(w && h)) { oyImage_Release( &in ); continue; } /* create a Cairo image */ image_surf = cairo_image_surface_create( CAIRO_FORMAT_ARGB32, w, h ); status = cairo_surface_status( image_surf ); if(status) return 1; /* write our dcraw stream on the Cairo image */ image_data = cairo_image_surface_get_data( image_surf ); size = w*h; /* build the color context */ if( to_moni ) { output = oyProfile_Copy( monitor, 0 ); } else { output = oyProfile_Copy( print, 0 ); } out = oyImage_Create( w, h, image_data, oyChannels_m(oyProfile_GetChannelsCount(output)+1) | oyDataType_m(oyUINT8), output, 0 ); /* create a processing graph to convert from one image to an other */ to_output = oyConversion_CreateBasicPixels( in, out, options, 0 ); /* tell Oyranos to apply defaults */ oyConversion_Correct( to_output, "//" OY_TYPE_STD "/icc_color", oyOPTIONATTRIBUTE_ADVANCED, 0 ); /* transform colors */ oyConversion_RunPixels( to_output, NULL ); oyConversion_Release( &to_output ); /*oyImage_WritePPM( out, "test_out.ppm", "out" );*/ oyImage_Release( &in ); oyImage_Release( &out ); /* Cairo uses a Blue Green Red Alpha channel layout */ #pragma omp parallel for for(j = 0; j < (int)size; ++j) { unsigned char t = image_data[j*4+2]; image_data[j*4+2] = image_data[j*4+0]; /*image_data[j*4+1] = 0;*/ image_data[j*4+0] = t; image_data[j*4+3] = 255; } cairo_surface_mark_dirty( image_surf ); /* place our images on a sheet */ if(argc-o > 1) { /* place in contact sheed style */ scale = (pixel_w - pixel_w/10.0)/4.0/(double)MAX(w,h); cairo_save( cr ); x = i%4 * (pixel_w - pixel_w/20.0)/4.0 + pixel_w/30.0; y = i/4 * (pixel_w - pixel_w/20.0)/4.0 + ((pixel_w - pixel_w/20.0)/4.0 - MIN(w,h)*scale)/2.0 + pixel_w/30.0; } else { /* single image */ scale = (pixel_w - pixel_w/10.0)/(double)MAX(w,h); x = pixel_w/20.0; y = ((pixel_w - pixel_w/20.0) - MIN(w,h)*scale)/2.0 + pixel_w/20.0; } /* draw a frame around the image */ frame = pixel_w/20.0 * scale; cairo_set_source_rgba( cr, .0, .0, .0, 1.0); cairo_set_line_width (cr, 1.); cairo_rectangle( cr, x - frame, y - frame, w*scale + 2*frame, h*scale + 2*frame); cairo_stroke(cr); /* draw the image */ cairo_translate( cr, x, y ); cairo_scale( cr, scale, scale ); cairo_set_source_surface( cr, image_surf, 0,0 ); cairo_paint( cr ); cairo_restore( cr ); /* small clean */ cairo_surface_destroy( image_surf ); } if(to_moni) cairo_surface_write_to_png( surface, "test.png" ); cairo_surface_finish( surface ); /* clean */ cairo_surface_destroy( surface ); oyProfile_Release( &monitor ); oyProfile_Release( &print ); return result; }
/** * @brief public Oyranos API's * @internal * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2008/09/26 * * A sample application on how to compose a set of camera raw files into a pdf, * while keeping attention to user color settings. * Beside Cairo and Oyranos you need DCraw during runtime. * * Compile: cc -pedantic -Wall -g `oyranos-config --cflags` `oyranos-config --ld_x_flags` `pkg-config --cflags --libs cairo` image2pdf.c -o image2pdf */ #include <stddef.h> #define __USE_POSIX2 1 #include <stdio.h> /* popen() */ FILE *popen(const char *__command, const char *__modes); #include <math.h> #include <string.h> #include <oyConversion_s.h> #include <oyProfile_s.h> /* Oyranos headers */ #include <oyranos_devices.h> #include <cairo.h> /* Cairo headers */ #include <cairo-pdf.h> #define MIN(a,b) ((a<b)?(a):(b)) #define MAX(a,b) ((a>b)?(a):(b)) int main(int argc, char **argv) { int result = 0; int i, j, o, error; cairo_t *cr = 0; cairo_surface_t *surface = NULL, *image_surf = NULL; cairo_status_t status; double page_w = 210.0, /* page width in mm */ page_h = 297.0, resolution = 72.0, /* Cairo PDF surface * resolution */ scale = 1.0, frame = 0; int pixel_w, pixel_h, /* page size in pixel */ x, y, w = 0, h = 0, /* image dimensions */ to_moni = 0; size_t size = 0; unsigned char *image_data = 0, rgba[4] = {127, 127, 127, 255}; oyProfile_s *monitor, *print, *output; oyConversion_s *to_output = 0; oyConfig_s *device = 0; uint32_t icc_profile_flags = 0; if (argc < 2) { printf("Merge some CamerRAW images into one output image and use\n"); printf("Oyranos CMS settings to obtain the result.\n"); printf("\n"); printf("Usage of the image2pdf example application:"); printf(" To obtain a PDF (test.pdf):\n"); printf(" image2pdf imageA.raw imageB.raw\n"); printf(" To obtain a monitor preview (test.png):\n"); printf(" image2pdf --monitor imageA.raw imageB.raw\n"); return 1; } o = 1; if (strcmp(argv[o], "--monitor") == 0 || strcmp(argv[o], "-m") == 0) { ++o; to_moni = 1; resolution = 96; } pixel_w = page_w / 25.4 * resolution; pixel_h = page_h / 25.4 * resolution; /* create a surface to place our images on */ if (to_moni) surface = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, pixel_w, pixel_h); else surface = cairo_pdf_surface_create("test.pdf", pixel_w, pixel_h); status = cairo_surface_status(surface); if (status) return 1; /* select profiles matching actual capabilities */ icc_profile_flags = oyICCProfileSelectionFlagsFromOptions(OY_CMM_STD, "//" OY_TYPE_STD "/icc_color", NULL, 0); /* * The monitor profile is located in the Xserver. For details see: * http://www.freedesktop.org/wiki/Specifications/icc_profiles_in_x_spec */ error = oyDeviceGet(OY_TYPE_STD, "monitor", 0, 0, &device); error = oyDeviceGetProfile(device, 0, &monitor); if (error > 0) fprintf(stderr, "oyDeviceGetProfile error: %d\n", error); printf("monitor: %s\n", oyProfile_GetText(monitor, oyNAME_DESCRIPTION)); /* * The output profile is equal to sRGB, as output profiles are curently * not supported in Cairo. */ print = oyProfile_FromStd(oyASSUMED_WEB, icc_profile_flags, 0); printf("print: %s\n", oyProfile_GetText(print, oyNAME_DESCRIPTION)); cr = cairo_create(surface); cairo_set_source_rgba(cr, rgba[0] / 255., rgba[1] / 255., rgba[2] / 255., 1.0); cairo_rectangle(cr, 0, 0, pixel_w, pixel_h); cairo_fill(cr); for (i = 0; i < argc - o; ++i) { const char *filename = argv[i + o]; oyOptions_s *options = NULL; oyImage_s *in = NULL, *out = NULL; error = oyImage_FromFile(filename, icc_profile_flags, &in, NULL); w = oyImage_GetWidth(in); h = oyImage_GetHeight(in); if (!(w && h)) { oyImage_Release(&in); continue; } /* create a Cairo image */ image_surf = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, w, h); status = cairo_surface_status(image_surf); if (status) return 1; /* write our dcraw stream on the Cairo image */ image_data = cairo_image_surface_get_data(image_surf); size = w * h; /* build the color context */ if (to_moni) { output = oyProfile_Copy(monitor, 0); } else { output = oyProfile_Copy(print, 0); } out = oyImage_Create(w, h, image_data, oyChannels_m(oyProfile_GetChannelsCount(output) + 1) | oyDataType_m(oyUINT8), output, 0); /* create a processing graph to convert from one image to an other */ to_output = oyConversion_CreateBasicPixels(in, out, options, 0); /* tell Oyranos to apply defaults */ oyConversion_Correct(to_output, "//" OY_TYPE_STD "/icc_color", oyOPTIONATTRIBUTE_ADVANCED, 0); /* transform colors */ oyConversion_RunPixels(to_output, NULL); oyConversion_Release(&to_output); /* oyImage_WritePPM( out, "test_out.ppm", "out" ); */ oyImage_Release(&in); oyImage_Release(&out); /* Cairo uses a Blue Green Red Alpha channel layout */ for (j = 0; j < (int)size; ++j) { unsigned char t = image_data[j * 4 + 2]; image_data[j * 4 + 2] = image_data[j * 4 + 0]; /* image_data[j*4+1] = 0; */ image_data[j * 4 + 0] = t; image_data[j * 4 + 3] = 255; } cairo_surface_mark_dirty(image_surf); /* place our images on a sheet */ if (argc - o > 1) { /* place in contact sheed style */ scale = (pixel_w - pixel_w / 10.0) / 4.0 / (double)MAX(w, h); cairo_save(cr); x = i % 4 * (pixel_w - pixel_w / 20.0) / 4.0 + pixel_w / 30.0; y = i / 4 * (pixel_w - pixel_w / 20.0) / 4.0 + ((pixel_w - pixel_w / 20.0) / 4.0 - MIN(w, h) * scale) / 2.0 + pixel_w / 30.0; } else { /* single image */ scale = (pixel_w - pixel_w / 10.0) / (double)MAX(w, h); x = pixel_w / 20.0; y = ((pixel_w - pixel_w / 20.0) - MIN(w, h) * scale) / 2.0 + pixel_w / 20.0; } /* draw a frame around the image */ frame = pixel_w / 20.0 * scale; cairo_set_source_rgba(cr, .0, .0, .0, 1.0); cairo_set_line_width(cr, 1.); cairo_rectangle(cr, x - frame, y - frame, w * scale + 2 * frame, h * scale + 2 * frame); cairo_stroke(cr); /* draw the image */ cairo_translate(cr, x, y); cairo_scale(cr, scale, scale); cairo_set_source_surface(cr, image_surf, 0, 0); cairo_paint(cr); cairo_restore(cr); /* small clean */ cairo_surface_destroy(image_surf); } if (to_moni) cairo_surface_write_to_png(surface, "test.png"); cairo_surface_finish(surface); /* clean */ cairo_surface_destroy(surface); oyProfile_Release(&monitor); oyProfile_Release(&print); return result; }
/** * @brief public Oyranos API's * @internal * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2008/09/26 * * A sample application on how to compose a set of camera raw files into a pdf, * while keeping attention to user color settings. * Beside Cairo and Oyranos you need DCraw during runtime. * * Compile: cc -pedantic -Wall -g `oyranos-config --cflags` `oyranos-config --ld_x_flags` `pkg-config --cflags --libs cairo` image2pdf.c -o image2pdf */ #include <stddef.h> #define __USE_POSIX2 1 #include <stdio.h> /* popen() */ FILE *popen(const char *__command, const char *__modes); #include <math.h> #include <string.h> #include <oyConversion_s.h> #include <oyProfile_s.h> /* Oyranos headers */ #include <oyranos_devices.h> #include <cairo.h> /* Cairo headers */ #include <cairo-pdf.h> #define MIN(a,b) ((a<b)?(a):(b)) #define MAX(a,b) ((a>b)?(a):(b)) int main(int argc, char **argv) { int result = 0; int i, j, o, error; cairo_t *cr = 0; cairo_surface_t *surface = NULL, *image_surf = NULL; cairo_status_t status; double page_w = 210.0, /* page width in mm */ page_h = 297.0, resolution = 72.0, /* Cairo PDF surface * resolution */ scale = 1.0, frame = 0; int pixel_w, pixel_h, /* page size in pixel */ x, y, w = 0, h = 0, /* image dimensions */ to_moni = 0; size_t size = 0; unsigned char *image_data = 0, rgba[4] = {127, 127, 127, 255}; oyProfile_s *monitor, *print, *output; oyConversion_s *to_output = 0; oyConfig_s *device = 0; uint32_t icc_profile_flags = 0; if (argc < 2) { printf("Merge some CamerRAW images into one output image and use\n"); printf("Oyranos CMS settings to obtain the result.\n"); printf("\n"); printf("Usage of the image2pdf example application:"); printf(" To obtain a PDF (test.pdf):\n"); printf(" image2pdf imageA.raw imageB.raw\n"); printf(" To obtain a monitor preview (test.png):\n"); printf(" image2pdf --monitor imageA.raw imageB.raw\n"); return 1; } o = 1; if (strcmp(argv[o], "--monitor") == 0 || strcmp(argv[o], "-m") == 0) { ++o; to_moni = 1; resolution = 96; } pixel_w = page_w / 25.4 * resolution; pixel_h = page_h / 25.4 * resolution; /* create a surface to place our images on */ if (to_moni) surface = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, pixel_w, pixel_h); else surface = cairo_pdf_surface_create("test.pdf", pixel_w, pixel_h); status = cairo_surface_status(surface); if (status) return 1; /* select profiles matching actual capabilities */ icc_profile_flags = oyICCProfileSelectionFlagsFromOptions(OY_CMM_STD, "//" OY_TYPE_STD "/icc_color", NULL, 0); /* * The monitor profile is located in the Xserver. For details see: * http://www.freedesktop.org/wiki/Specifications/icc_profiles_in_x_spec */ error = oyDeviceGet(OY_TYPE_STD, "monitor", 0, 0, &device); error = oyDeviceGetProfile(device, 0, &monitor); if (error > 0) fprintf(stderr, "oyDeviceGetProfile error: %d\n", error); printf("monitor: %s\n", oyProfile_GetText(monitor, oyNAME_DESCRIPTION)); /* * The output profile is equal to sRGB, as output profiles are curently * not supported in Cairo. */ print = oyProfile_FromStd(oyASSUMED_WEB, icc_profile_flags, 0); printf("print: %s\n", oyProfile_GetText(print, oyNAME_DESCRIPTION)); cr = cairo_create(surface); cairo_set_source_rgba(cr, rgba[0] / 255., rgba[1] / 255., rgba[2] / 255., 1.0); cairo_rectangle(cr, 0, 0, pixel_w, pixel_h); cairo_fill(cr); for (i = 0; i < argc - o; ++i) { const char *filename = argv[i + o]; oyOptions_s *options = NULL; oyImage_s *in = NULL, *out = NULL; error = oyImage_FromFile(filename, icc_profile_flags, &in, NULL); w = oyImage_GetWidth(in); h = oyImage_GetHeight(in); if (!(w && h)) { oyImage_Release(&in); continue; } /* create a Cairo image */ image_surf = cairo_image_surface_create(CAIRO_FORMAT_ARGB32, w, h); status = cairo_surface_status(image_surf); if (status) return 1; /* write our dcraw stream on the Cairo image */ image_data = cairo_image_surface_get_data(image_surf); size = w * h; /* build the color context */ if (to_moni) { output = oyProfile_Copy(monitor, 0); } else { output = oyProfile_Copy(print, 0); } out = oyImage_Create(w, h, image_data, oyChannels_m(oyProfile_GetChannelsCount(output) + 1) | oyDataType_m(oyUINT8), output, 0); /* create a processing graph to convert from one image to an other */ to_output = oyConversion_CreateBasicPixels(in, out, options, 0); /* tell Oyranos to apply defaults */ oyConversion_Correct(to_output, "//" OY_TYPE_STD "/icc_color", oyOPTIONATTRIBUTE_ADVANCED, 0); /* transform colors */ oyConversion_RunPixels(to_output, NULL); oyConversion_Release(&to_output); /* oyImage_WritePPM( out, "test_out.ppm", "out" ); */ oyImage_Release(&in); oyImage_Release(&out); /* Cairo uses a Blue Green Red Alpha channel layout */ #pragma omp parallel for for (j = 0; j < (int)size; ++j) { unsigned char t = image_data[j * 4 + 2]; image_data[j * 4 + 2] = image_data[j * 4 + 0]; /* image_data[j*4+1] = 0; */ image_data[j * 4 + 0] = t; image_data[j * 4 + 3] = 255; } cairo_surface_mark_dirty(image_surf); /* place our images on a sheet */ if (argc - o > 1) { /* place in contact sheed style */ scale = (pixel_w - pixel_w / 10.0) / 4.0 / (double)MAX(w, h); cairo_save(cr); x = i % 4 * (pixel_w - pixel_w / 20.0) / 4.0 + pixel_w / 30.0; y = i / 4 * (pixel_w - pixel_w / 20.0) / 4.0 + ((pixel_w - pixel_w / 20.0) / 4.0 - MIN(w, h) * scale) / 2.0 + pixel_w / 30.0; } else { /* single image */ scale = (pixel_w - pixel_w / 10.0) / (double)MAX(w, h); x = pixel_w / 20.0; y = ((pixel_w - pixel_w / 20.0) - MIN(w, h) * scale) / 2.0 + pixel_w / 20.0; } /* draw a frame around the image */ frame = pixel_w / 20.0 * scale; cairo_set_source_rgba(cr, .0, .0, .0, 1.0); cairo_set_line_width(cr, 1.); cairo_rectangle(cr, x - frame, y - frame, w * scale + 2 * frame, h * scale + 2 * frame); cairo_stroke(cr); /* draw the image */ cairo_translate(cr, x, y); cairo_scale(cr, scale, scale); cairo_set_source_surface(cr, image_surf, 0, 0); cairo_paint(cr); cairo_restore(cr); /* small clean */ cairo_surface_destroy(image_surf); } if (to_moni) cairo_surface_write_to_png(surface, "test.png"); cairo_surface_finish(surface); /* clean */ cairo_surface_destroy(surface); oyProfile_Release(&monitor); oyProfile_Release(&print); return result; }
dp_parallelized_hard_knapsack_.c
#include<stdio.h> #include<stdlib.h> #include<omp.h> #include<stdbool.h> #include<string.h> #include <windows.h> #include<math.h> #include <dos.h> #include <dir.h> #define SIZE 100000 //#define binsize 50 char mssg[SIZE]; //**Only condition for knapsack encryption is message must be a multiple of number of knapsack elements.**// // Function for finding length int length(char *p){ int j=0,len=0; while(p[j]!='\0'){ len+=1; j++; } return len; } //Function for Message Encryption using Public Key int * Encryption(int n,char *message,int *public_key,int *encrypted,int mssg_length){ //printf("\nMessage length : %d",mssg_length); int pos=0,sum,i,j; for(i=0;i<(int)mssg_length/n;i+=1){ //printf("i value : %d",i); sum=0; j=i*n; while(j<(i+1)*n && message[j]!='\0'){ if(message[j]=='1'){ sum+=public_key[j%n]; //printf("\n%d ",public_key[j%n]); } j++; } //printf("Sum is : %d",sum); encrypted[pos]=sum; pos++; //printf("\n"); } //printf("\n\nEncrypted message is : "); // i=0; // while(encrypted[i]!='\0'){ // printf("%d ",encrypted[i]); // i++; // } return encrypted; } //Function for finding Multiplicative Inverse during Decryption Process int find_multiplicative_inverse(int x,int y){ int i; for(i=1;i<y;i++){ if(((i*x)%y)==1) return i; } } void subsetSum(int arr[],int n,int k){ int i,j; int elem_pos[n]; for(i=0;i<n;i++)elem_pos[i]=0; int m=pow(2,n); #pragma omp distribute simd for(i=m-1;i>0;i--){ int sum=0,b=i; for(j=0;j<n;j++){ sum+=(b%2)*arr[j]; b=b/2; } if(sum==k){ int b=i; for(j=0;j<n;j++){ if(b%2)elem_pos[j]=1;//printf("%d ",arr[j]); b=b/2; } } } int original_mssg=0; for(j=0;j<n;j++){ original_mssg=original_mssg+pow(2,j)*elem_pos[n-1-j]; } printf("%c",(char)original_mssg); } // Function for Decryption of Message using Private Key void Decryption(int *encrypt_pointer,int W,int m,int encrypt_items,int *private_key,int n){ int n_inverse=find_multiplicative_inverse(W,m); //printf("\nModulo inverse is : %d , encrypt_items is : %d",n_inverse,encrypt_items); printf("\n"); int i=0,decrypt; while(i<encrypt_items){ decrypt=(encrypt_pointer[i]*n_inverse)%m; //printf("Encrypt Pointer : %d",encrypt_pointer[i]); subsetSum(private_key,n,decrypt); i++; } } //Function for generating Private Key int * generate_private_key(int *arr){ return arr; } //Function for generating Public Key int * generate_public_key(int *arr,int *public_array,int n,int m,int C){ int i; #pragma omp parallel for private(i) for(i=0;i<n;i++){ public_array[i]=(arr[i]*C)%m; } return public_array; } //Function for checking if given Sequence is Superincreasing or not(***This condition is important for knapsac***) bool check_superincreasing(int *arr,int n){ int i,j,sum; for(i=1;i<n;i++){ sum=0; #pragma omp parallel for reduction(+ : sum) for(j=0;j<i;j++){ sum+=arr[j]; } if(arr[i]<sum){ return false; } } return true; } //Function for checking Prime bool isPrime(int x){ int p,flag=0; #pragma omp parallel for (p=2; p<=x/2; ++p) { // condition for non-prime if (x % p == 0) { flag = 1; break; } } if(flag==0){ return true; } else{ return false; } } //Function for checking if Multiplier is compatible with modulus bool check_multiplier(int m,int C){ if(m>C && m%C!=0 && isPrime(m)) return true; return false; } //Function for Generating modulus int generate_modulus(int *arr,int n,int W){ int i,arr_sum=0; #pragma omp parallel for reduction(+:arr_sum) for(i=0;i<n;i++){ arr_sum+=arr[i]; } i=arr_sum+1; while(1){ if(check_multiplier(i,W)){ return i; } i++; } } //Function for displaying Public Key void displaypublickey(int *public_key,int n){ int j; printf("\n\nPublic Key is : "); for(j=0;j<n;j++){ printf("%d ",public_key[j]); } } //Function for displaying Private Key void displayprivatekey(int *private_key,int n){ int j; printf("\n\nPrivate Key is : "); for(j=0;j<n;j++){ printf("%d ",private_key[j]); } } //Set color of text void SetColor(int ForgC) { WORD wColor; HANDLE hStdOut = GetStdHandle(STD_OUTPUT_HANDLE); CONSOLE_SCREEN_BUFFER_INFO csbi; //We use csbi for the wAttributes word. if(GetConsoleScreenBufferInfo(hStdOut, &csbi)) { //Mask out all but the background attribute, and add in the forgournd color wColor = (csbi.wAttributes & 0xF0) + (ForgC & 0x0F); SetConsoleTextAttribute(hStdOut, wColor); } return; } //included this change1 void stringToBinary(char* s) { if(s == NULL) return 0; /* no input string */ size_t len = strlen(s); size_t i;int j; //char *binary = malloc(len*8 + 1); // each char is one byte (8 bits) and + 1 at the end for null terminator mssg[0] = '\0'; for(i = 0; i < len; ++i) { char ch = s[i]; for(j = 7; j >= 0; --j){ if(ch & (1 << j)) { strcat(mssg,"1"); } else { strcat(mssg,"0"); } } } //return binary; } int main(void){ int n; int W; int i; SetColor(15); printf("\t\t\t****PARALLEL 0-1 HARD KNAPSACK FOR PUBLIC KEY ENCRYPTION (Parallel-Time)****"); printf("\n\nEnter number of elements in Knapsack : "); scanf("%d",&n); int knapsack[n]; printf("\nEnter knapsack elements : "); for(i=0;i<n;i++){ scanf("%d",&knapsack[i]); } printf("\nEnter the total weight of the knapsack : "); scanf("%d",&W); char str[SIZE]; // if(check_superincreasing(knapsack,n)){ int *private_key = generate_private_key(knapsack); //displayprivatekey(private_key,n); int m = generate_modulus(knapsack,n,W); //printf("\n\nModulus is : %d",m); int public_array[n]; int *public_key = generate_public_key(knapsack,public_array,n,m,W); //displaypublickey(public_key,n); printf("\n\nEnter message to be encoded : "); scanf("%s",str);//unable to use either gets or fgets since it is malfunctioning in c. stringToBinary(str); //printf("Ascii value of given String is : %s",mssg); int mssg_length=length(mssg); int encrypted[(mssg_length/n)+1]; SetColor(14); double start = omp_get_wtime(); printf("\nEncryption process started"); // for(i=0;i<5;i++){ // printf("."); // sleep(3); // } int *encrypt_p=Encryption(n,&mssg,public_key,&encrypted,mssg_length); printf("\nEncryption Successfull!!!!"); SetColor(2); printf("\n\nDecryption process started"); // for(i=0;i<5;i++){ // printf("."); // sleep(3); // } printf("\n\nDecrypted Message is : "); Decryption(encrypt_p,W,m,(mssg_length/n),private_key,n); printf("\nDecryption Successfull!!!!"); double end = omp_get_wtime( ); double total=end-start; printf("Total Time for Parallel Knapsack is : %lf",total); SetColor(15); //("Superincreasing sequence"); // } // else{ // printf("Not a Superincreasing sequence"); // } // return 0; }
#include<stdio.h> #include<stdlib.h> #include<omp.h> #include<stdbool.h> #include<string.h> #include <windows.h> #include<math.h> #include <dos.h> #include <dir.h> #define SIZE 100000 // #define binsize 50 char mssg[SIZE]; //**Only condition for knapsack encryption is message must be a multiple of number of knapsack elements.** // //Function for finding length int length(char *p) { int j = 0, len = 0; while (p[j] != '\0') { len += 1; j++; } return len; } //Function for Message Encryption using Public Key int *Encryption(int n, char *message, int *public_key, int *encrypted, int mssg_length) { //printf("\nMessage length : %d", mssg_length); int pos = 0, sum, i, j; for (i = 0; i < (int)mssg_length / n; i += 1) { //printf("i value : %d", i); sum = 0; j = i * n; while (j < (i + 1) * n && message[j] != '\0') { if (message[j] == '1') { sum += public_key[j % n]; //printf("\n%d ", public_key[j % n]); } j++; } //printf("Sum is : %d", sum); encrypted[pos] = sum; pos++; //printf("\n"); } //printf("\n\nEncrypted message is : "); //i = 0; //while (encrypted[i] != '\0') { //printf("%d ", encrypted[i]); //i++; // } return encrypted; } //Function for finding Multiplicative Inverse during Decryption Process int find_multiplicative_inverse(int x, int y) { int i; for (i = 1; i < y; i++) { if (((i * x) % y) == 1) return i; } } void subsetSum(int arr[], int n, int k) { int i, j; int elem_pos[n]; for (i = 0; i < n; i++) elem_pos[i] = 0; int m = pow(2, n); for (i = m - 1; i > 0; i--) { int sum = 0, b = i; for (j = 0; j < n; j++) { sum += (b % 2) * arr[j]; b = b / 2; } if (sum == k) { int b = i; for (j = 0; j < n; j++) { if (b % 2) elem_pos[j] = 1; //printf("%d ", arr[j]); b = b / 2; } } } int original_mssg = 0; for (j = 0; j < n; j++) { original_mssg = original_mssg + pow(2, j) * elem_pos[n - 1 - j]; } printf("%c", (char)original_mssg); } //Function for Decryption of Message using Private Key void Decryption(int *encrypt_pointer, int W, int m, int encrypt_items, int *private_key, int n) { int n_inverse = find_multiplicative_inverse(W, m); //printf("\nModulo inverse is : %d , encrypt_items is : %d", n_inverse, encrypt_items); printf("\n"); int i = 0, decrypt; while (i < encrypt_items) { decrypt = (encrypt_pointer[i] * n_inverse) % m; //printf("Encrypt Pointer : %d", encrypt_pointer[i]); subsetSum(private_key, n, decrypt); i++; } } //Function for generating Private Key int *generate_private_key(int *arr) { return arr; } //Function for generating Public Key int *generate_public_key(int *arr, int *public_array, int n, int m, int C) { int i; for (i = 0; i < n; i++) { public_array[i] = (arr[i] * C) % m; } return public_array; } //Function for checking if given Sequence is Superincreasing or not(***This condition is important for knapsac ***) bool check_superincreasing(int *arr, int n) { int i, j, sum; for (i = 1; i < n; i++) { sum = 0; for (j = 0; j < i; j++) { sum += arr[j]; } if (arr[i] < sum) { return false; } } return true; } //Function for checking Prime bool isPrime(int x) { int p, flag = 0; for (p = 2; p <= x / 2; ++p) { //condition for non -prime if (x % p == 0) { flag = 1; break; } } if (flag == 0) { return true; } else { return false; } } //Function for checking if Multiplier is compatible with modulus bool check_multiplier(int m, int C) { if (m > C && m % C != 0 && isPrime(m)) return true; return false; } //Function for Generating modulus int generate_modulus(int *arr, int n, int W) { int i, arr_sum = 0; for (i = 0; i < n; i++) { arr_sum += arr[i]; } i = arr_sum + 1; while (1) { if (check_multiplier(i, W)) { return i; } i++; } } //Function for displaying Public Key void displaypublickey(int *public_key, int n) { int j; printf("\n\nPublic Key is : "); for (j = 0; j < n; j++) { printf("%d ", public_key[j]); } } //Function for displaying Private Key void displayprivatekey(int *private_key, int n) { int j; printf("\n\nPrivate Key is : "); for (j = 0; j < n; j++) { printf("%d ", private_key[j]); } } //Set color of text void SetColor(int ForgC) { WORD wColor; HANDLE hStdOut = GetStdHandle(STD_OUTPUT_HANDLE); CONSOLE_SCREEN_BUFFER_INFO csbi; //We use csbi for the wAttributes word. if (GetConsoleScreenBufferInfo(hStdOut, &csbi)) { //Mask out all but the background attribute, and add in the forgournd color wColor = (csbi.wAttributes & 0xF0) + (ForgC & 0x0F); SetConsoleTextAttribute(hStdOut, wColor); } return; } //included this change1 void stringToBinary(char *s) { if (s == NULL) return 0; /* no input string */ size_t len = strlen(s); size_t i; int j; //char *binary = malloc(len * 8 + 1); //each char is one byte(8 bits) and + 1 at the end for null terminator mssg[0] = '\0'; for (i = 0; i < len; ++i) { char ch = s[i]; for (j = 7; j >= 0; --j) { if (ch & (1 << j)) { strcat(mssg, "1"); } else { strcat(mssg, "0"); } } } //return binary; } int main(void) { int n; int W; int i; SetColor(15); printf("\t\t\t****PARALLEL 0-1 HARD KNAPSACK FOR PUBLIC KEY ENCRYPTION (Parallel-Time)****"); printf("\n\nEnter number of elements in Knapsack : "); scanf("%d", &n); int knapsack[n]; printf("\nEnter knapsack elements : "); for (i = 0; i < n; i++) { scanf("%d", &knapsack[i]); } printf("\nEnter the total weight of the knapsack : "); scanf("%d", &W); char str[SIZE]; //if (check_superincreasing(knapsack, n)) { int *private_key = generate_private_key(knapsack); //displayprivatekey(private_key, n); int m = generate_modulus(knapsack, n, W); //printf("\n\nModulus is : %d", m); int public_array[n]; int *public_key = generate_public_key(knapsack, public_array, n, m, W); //displaypublickey(public_key, n); printf("\n\nEnter message to be encoded : "); scanf("%s", str); //unable to use either gets or fgets since it is malfunctioning in c. stringToBinary(str); //printf("Ascii value of given String is : %s", mssg); int mssg_length = length(mssg); int encrypted[(mssg_length / n) + 1]; SetColor(14); double start = omp_get_wtime(); printf("\nEncryption process started"); //for (i = 0; i < 5; i++) { //printf("."); //sleep(3); // } int *encrypt_p = Encryption(n, &mssg, public_key, &encrypted, mssg_length); printf("\nEncryption Successfull!!!!"); SetColor(2); printf("\n\nDecryption process started"); //for (i = 0; i < 5; i++) { //printf("."); //sleep(3); // } printf("\n\nDecrypted Message is : "); Decryption(encrypt_p, W, m, (mssg_length / n), private_key, n); printf("\nDecryption Successfull!!!!"); double end = omp_get_wtime(); double total = end - start; printf("Total Time for Parallel Knapsack is : %lf", total); SetColor(15); //("Superincreasing sequence"); // } // else { //printf("Not a Superincreasing sequence"); // } // return 0; }
#include<stdio.h> #include<stdlib.h> #include<omp.h> #include<stdbool.h> #include<string.h> #include <windows.h> #include<math.h> #include <dos.h> #include <dir.h> #define SIZE 100000 // #define binsize 50 char mssg[SIZE]; //**Only condition for knapsack encryption is message must be a multiple of number of knapsack elements.** // //Function for finding length int length(char *p) { int j = 0, len = 0; while (p[j] != '\0') { len += 1; j++; } return len; } //Function for Message Encryption using Public Key int *Encryption(int n, char *message, int *public_key, int *encrypted, int mssg_length) { //printf("\nMessage length : %d", mssg_length); int pos = 0, sum, i, j; for (i = 0; i < (int)mssg_length / n; i += 1) { //printf("i value : %d", i); sum = 0; j = i * n; while (j < (i + 1) * n && message[j] != '\0') { if (message[j] == '1') { sum += public_key[j % n]; //printf("\n%d ", public_key[j % n]); } j++; } //printf("Sum is : %d", sum); encrypted[pos] = sum; pos++; //printf("\n"); } //printf("\n\nEncrypted message is : "); //i = 0; //while (encrypted[i] != '\0') { //printf("%d ", encrypted[i]); //i++; // } return encrypted; } //Function for finding Multiplicative Inverse during Decryption Process int find_multiplicative_inverse(int x, int y) { int i; for (i = 1; i < y; i++) { if (((i * x) % y) == 1) return i; } } void subsetSum(int arr[], int n, int k) { int i, j; int elem_pos[n]; for (i = 0; i < n; i++) elem_pos[i] = 0; int m = pow(2, n); #pragma omp distribute simd for (i = m - 1; i > 0; i--) { int sum = 0, b = i; for (j = 0; j < n; j++) { sum += (b % 2) * arr[j]; b = b / 2; } if (sum == k) { int b = i; for (j = 0; j < n; j++) { if (b % 2) elem_pos[j] = 1; //printf("%d ", arr[j]); b = b / 2; } } } int original_mssg = 0; for (j = 0; j < n; j++) { original_mssg = original_mssg + pow(2, j) * elem_pos[n - 1 - j]; } printf("%c", (char)original_mssg); } //Function for Decryption of Message using Private Key void Decryption(int *encrypt_pointer, int W, int m, int encrypt_items, int *private_key, int n) { int n_inverse = find_multiplicative_inverse(W, m); //printf("\nModulo inverse is : %d , encrypt_items is : %d", n_inverse, encrypt_items); printf("\n"); int i = 0, decrypt; while (i < encrypt_items) { decrypt = (encrypt_pointer[i] * n_inverse) % m; //printf("Encrypt Pointer : %d", encrypt_pointer[i]); subsetSum(private_key, n, decrypt); i++; } } //Function for generating Private Key int *generate_private_key(int *arr) { return arr; } //Function for generating Public Key int *generate_public_key(int *arr, int *public_array, int n, int m, int C) { int i; #pragma omp parallel for private(i) for (i = 0; i < n; i++) { public_array[i] = (arr[i] * C) % m; } return public_array; } //Function for checking if given Sequence is Superincreasing or not(***This condition is important for knapsac ***) bool check_superincreasing(int *arr, int n) { int i, j, sum; for (i = 1; i < n; i++) { sum = 0; #pragma omp parallel for reduction(+ : sum) for (j = 0; j < i; j++) { sum += arr[j]; } if (arr[i] < sum) { return false; } } return true; } //Function for checking Prime bool isPrime(int x) { int p, flag = 0; #pragma omp parallel for (p = 2; p <= x / 2; ++p) { //condition for non -prime if (x % p == 0) { flag = 1; break; } } if (flag == 0) { return true; } else { return false; } } //Function for checking if Multiplier is compatible with modulus bool check_multiplier(int m, int C) { if (m > C && m % C != 0 && isPrime(m)) return true; return false; } //Function for Generating modulus int generate_modulus(int *arr, int n, int W) { int i, arr_sum = 0; #pragma omp parallel for reduction(+:arr_sum) for (i = 0; i < n; i++) { arr_sum += arr[i]; } i = arr_sum + 1; while (1) { if (check_multiplier(i, W)) { return i; } i++; } } //Function for displaying Public Key void displaypublickey(int *public_key, int n) { int j; printf("\n\nPublic Key is : "); for (j = 0; j < n; j++) { printf("%d ", public_key[j]); } } //Function for displaying Private Key void displayprivatekey(int *private_key, int n) { int j; printf("\n\nPrivate Key is : "); for (j = 0; j < n; j++) { printf("%d ", private_key[j]); } } //Set color of text void SetColor(int ForgC) { WORD wColor; HANDLE hStdOut = GetStdHandle(STD_OUTPUT_HANDLE); CONSOLE_SCREEN_BUFFER_INFO csbi; //We use csbi for the wAttributes word. if (GetConsoleScreenBufferInfo(hStdOut, &csbi)) { //Mask out all but the background attribute, and add in the forgournd color wColor = (csbi.wAttributes & 0xF0) + (ForgC & 0x0F); SetConsoleTextAttribute(hStdOut, wColor); } return; } //included this change1 void stringToBinary(char *s) { if (s == NULL) return 0; /* no input string */ size_t len = strlen(s); size_t i; int j; //char *binary = malloc(len * 8 + 1); //each char is one byte(8 bits) and + 1 at the end for null terminator mssg[0] = '\0'; for (i = 0; i < len; ++i) { char ch = s[i]; for (j = 7; j >= 0; --j) { if (ch & (1 << j)) { strcat(mssg, "1"); } else { strcat(mssg, "0"); } } } //return binary; } int main(void) { int n; int W; int i; SetColor(15); printf("\t\t\t****PARALLEL 0-1 HARD KNAPSACK FOR PUBLIC KEY ENCRYPTION (Parallel-Time)****"); printf("\n\nEnter number of elements in Knapsack : "); scanf("%d", &n); int knapsack[n]; printf("\nEnter knapsack elements : "); for (i = 0; i < n; i++) { scanf("%d", &knapsack[i]); } printf("\nEnter the total weight of the knapsack : "); scanf("%d", &W); char str[SIZE]; //if (check_superincreasing(knapsack, n)) { int *private_key = generate_private_key(knapsack); //displayprivatekey(private_key, n); int m = generate_modulus(knapsack, n, W); //printf("\n\nModulus is : %d", m); int public_array[n]; int *public_key = generate_public_key(knapsack, public_array, n, m, W); //displaypublickey(public_key, n); printf("\n\nEnter message to be encoded : "); scanf("%s", str); //unable to use either gets or fgets since it is malfunctioning in c. stringToBinary(str); //printf("Ascii value of given String is : %s", mssg); int mssg_length = length(mssg); int encrypted[(mssg_length / n) + 1]; SetColor(14); double start = omp_get_wtime(); printf("\nEncryption process started"); //for (i = 0; i < 5; i++) { //printf("."); //sleep(3); // } int *encrypt_p = Encryption(n, &mssg, public_key, &encrypted, mssg_length); printf("\nEncryption Successfull!!!!"); SetColor(2); printf("\n\nDecryption process started"); //for (i = 0; i < 5; i++) { //printf("."); //sleep(3); // } printf("\n\nDecrypted Message is : "); Decryption(encrypt_p, W, m, (mssg_length / n), private_key, n); printf("\nDecryption Successfull!!!!"); double end = omp_get_wtime(); double total = end - start; printf("Total Time for Parallel Knapsack is : %lf", total); SetColor(15); //("Superincreasing sequence"); // } // else { //printf("Not a Superincreasing sequence"); // } // return 0; }
GB_binop__isge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16) // A*D function (colscale): GB (_AxD__isge_uint16) // D*A function (rowscale): GB (_DxB__isge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16) // C=scalar+B GB (_bind1st__isge_uint16) // C=scalar+B' GB (_bind1st_tran__isge_uint16) // C=A+scalar GB (_bind2nd__isge_uint16) // C=A'+scalar GB (_bind2nd_tran__isge_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16) // A*D function (colscale): GB (_AxD__isge_uint16) // D*A function (rowscale): GB (_DxB__isge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16) // C=scalar+B GB (_bind1st__isge_uint16) // C=scalar+B' GB (_bind1st_tran__isge_uint16) // C=A+scalar GB (_bind2nd__isge_uint16) // C=A'+scalar GB (_bind2nd_tran__isge_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16) // A*D function (colscale): GB (_AxD__isge_uint16) // D*A function (rowscale): GB (_DxB__isge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16) // C=scalar+B GB (_bind1st__isge_uint16) // C=scalar+B' GB (_bind1st_tran__isge_uint16) // C=A+scalar GB (_bind2nd__isge_uint16) // C=A'+scalar GB (_bind2nd_tran__isge_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_orphan.c
/****************************************************************************** * FILE: omp_orphan.c * DESCRIPTION: * OpenMP Example - Parallel region with an orphaned directive - C/C++ Version * This example demonstrates a dot product being performed by an orphaned * loop reduction construct. Scoping of the reduction variable is critical. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define VECLEN 100 float a[VECLEN], b[VECLEN], sum; float dotprod () { int i,tid; tid = omp_get_thread_num(); #pragma omp for reduction(+:sum) for (i=0; i < VECLEN; i++) { sum = sum + (a[i]*b[i]); printf(" tid= %d i=%d\n",tid,i); } return(sum); } int main (int argc, char *argv[]) { int i; for (i=0; i < VECLEN; i++) a[i] = b[i] = 1.0 * i; sum = 0.0; #pragma omp parallel sum = dotprod(); printf("Sum = %f\n",sum); return 0; }
/****************************************************************************** * FILE: omp_orphan.c * DESCRIPTION: * OpenMP Example - Parallel region with an orphaned directive - C/C++ Version * This example demonstrates a dot product being performed by an orphaned * loop reduction construct. Scoping of the reduction variable is critical. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define VECLEN 100 float a[VECLEN], b[VECLEN], sum; float dotprod() { int i, tid; tid = omp_get_thread_num(); for (i = 0; i < VECLEN; i++) { sum = sum + (a[i] * b[i]); printf(" tid= %d i=%d\n", tid, i); } return (sum); } int main(int argc, char *argv[]) { int i; for (i = 0; i < VECLEN; i++) a[i] = b[i] = 1.0 * i; sum = 0.0; sum = dotprod(); printf("Sum = %f\n", sum); return 0; }
/****************************************************************************** * FILE: omp_orphan.c * DESCRIPTION: * OpenMP Example - Parallel region with an orphaned directive - C/C++ Version * This example demonstrates a dot product being performed by an orphaned * loop reduction construct. Scoping of the reduction variable is critical. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define VECLEN 100 float a[VECLEN], b[VECLEN], sum; float dotprod() { int i, tid; tid = omp_get_thread_num(); #pragma omp for reduction(+:sum) for (i = 0; i < VECLEN; i++) { sum = sum + (a[i] * b[i]); printf(" tid= %d i=%d\n", tid, i); } return (sum); } int main(int argc, char *argv[]) { int i; for (i = 0; i < VECLEN; i++) a[i] = b[i] = 1.0 * i; sum = 0.0; #pragma omp parallel sum = dotprod(); printf("Sum = %f\n", sum); return 0; }
cloudkeychain_fmt_plug.c
/* 1Password Cloud Keychain cracker patch for JtR. Hacked together during * April of 2013 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru.kholia at gmail.com>, * Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> and Copyright (c) 2012 * magnum, and it is hereby released to the general public under the following * terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This software is based on "onepasswordpy" project but no actual code is * borrowed from it. * * "onepasswordpy" project is at https://github.com/Roguelazer/onepasswordpy */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cloud_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_cloud_keychain); #else #include <string.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "stdint.h" #include "sha2.h" #include "pbkdf2_hmac_sha512.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 1 #endif #include "memdbg.h" #define FORMAT_LABEL "cloudkeychain" #define FORMAT_NAME "1Password Cloud Keychain" #ifdef MMX_COEF_SHA512 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define HASH_LENGTH 64 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef MMX_COEF_SHA512 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SALTLEN 32 #define IVLEN 16 #define CTLEN 2048 #define EHMLEN 32 #define PAD_SIZE 128 static struct fmt_tests cloud_keychain_tests[] = { {"$cloudkeychain$16$2e57e8b57eda4d99df2fe02324960044$227272$336$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$256$16$881d65af6b863f6678d484ff551bc843$272$a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b$32$6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$304$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b", "fred"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned int saltlen; unsigned char salt[SALTLEN]; unsigned int iterations; unsigned int masterkeylen; unsigned char masterkey[CTLEN]; unsigned int plaintextlen; unsigned int ivlen; unsigned char iv[32]; unsigned int cryptextlen; unsigned char cryptext[CTLEN]; unsigned int expectedhmaclen; unsigned char expectedhmac[EHMLEN]; unsigned int hmacdatalen; unsigned char hmacdata[CTLEN]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); cracked = mem_calloc_tiny(sizeof(*cracked) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int isdecu(char *q) { char buf[24]; unsigned int x = atoi(q); /* this is how it is 'used', atoi() to unsigned */ sprintf(buf, "%u", x); return !strcmp(q,buf); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len; if (strncmp(ciphertext, "$cloudkeychain$", 15) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 15; if ((p = strtok(ctcopy, "$")) == NULL) /* salt length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* salt */ goto err; if (!ishex(p)) goto err; if(strlen(p) != len * 2) /* validates salt_len also */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* masterkey length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* masterkey */ goto err; if (!ishex(p)) goto err; if(strlen(p) != len * 2) /* validates masterkey_len also */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* plaintext length */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv length */ goto err; len = atoi(p); if(len > IVLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv */ goto err; if(strlen(p) != len * 2) /* validates iv_len */ goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* cryptext length */ goto err; len = atoi(p); if (len > CTLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* cryptext */ goto err; if (!ishex(p)) goto err; if(strlen(p) != len * 2) /* validates cryptext_len */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* expectedhmac length */ goto err; len = atoi(p); if (len > EHMLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* expectedhmac */ goto err; if (!ishex(p)) goto err; if(strlen(p) != len * 2) /* validates expectedhmac_len */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* hmacdata length */ goto err; len = atoi(p); if (len > CTLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* hmacdata */ goto err; if (!ishex(p)) goto err; if(strlen(p) != len * 2) /* validates hmacdata_len */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 15; /* skip over "$cloudkeychain$" */ p = strtok(ctcopy, "$"); cs.saltlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.iterations = atoi(p); p = strtok(NULL, "$"); cs.masterkeylen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.masterkeylen; i++) cs.masterkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.plaintextlen = atoi(p); p = strtok(NULL, "$"); cs.ivlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.ivlen; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.cryptextlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.cryptextlen; i++) cs.cryptext[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.expectedhmaclen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.expectedhmaclen; i++) cs.expectedhmac[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.hmacdatalen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.hmacdatalen; i++) cs.hmacdata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void hmac_sha256(uint8_t * pass, uint8_t passlen, uint8_t * salt, uint32_t saltlen, uint32_t add, uint64_t * ret) { uint8_t i, ipad[64], opad[64]; SHA256_CTX ctx; memset(ipad, 0x36, 64); memset(opad, 0x5c, 64); for (i = 0; i < passlen; i++) { ipad[i] ^= pass[i]; opad[i] ^= pass[i]; } SHA256_Init(&ctx); SHA256_Update(&ctx, ipad, 64); SHA256_Update(&ctx, salt, saltlen); if (add > 0) { #if ARCH_LITTLE_ENDIAN add = JOHNSWAP(add); #endif SHA256_Update(&ctx, &add, 4); } SHA256_Final((uint8_t *) ret, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, opad, 64); SHA256_Update(&ctx, (uint8_t *) ret, 32); SHA256_Final((uint8_t *) ret, &ctx); } static int ckcdecrypt(unsigned char *key) { uint64_t tmp[8]; hmac_sha256(key + 32, 32, cur_salt->hmacdata, cur_salt->hmacdatalen, 0, tmp); if (!memcmp(tmp, cur_salt->expectedhmac, 32)) return 1; else return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; uint64_t key[SSE_GROUP_SZ_SHA512][8]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (ARCH_WORD_32*)(key[i]); } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0); for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) cracked[index+i] = ckcdecrypt((unsigned char*)(key[i])); #else uint64_t key[8]; pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, (unsigned char*)key, HASH_LENGTH, 0); cracked[index] = ckcdecrypt((unsigned char*)key); #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void cloud_keychain_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } #endif struct fmt_main fmt_cloud_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif cloud_keychain_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, cloud_keychain_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_cloud_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_cloud_keychain); #else #include <string.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "stdint.h" #include "sha2.h" #include "pbkdf2_hmac_sha512.h" #include "memdbg.h" #define FORMAT_LABEL "cloudkeychain" #define FORMAT_NAME "1Password Cloud Keychain" #ifdef MMX_COEF_SHA512 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define HASH_LENGTH 64 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef MMX_COEF_SHA512 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SALTLEN 32 #define IVLEN 16 #define CTLEN 2048 #define EHMLEN 32 #define PAD_SIZE 128 static struct fmt_tests cloud_keychain_tests[] = { {"$cloudkeychain$16$2e57e8b57eda4d99df2fe02324960044$227272$336$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$256$16$881d65af6b863f6678d484ff551bc843$272$a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b$32$6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$304$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b", "fred"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned int saltlen; unsigned char salt[SALTLEN]; unsigned int iterations; unsigned int masterkeylen; unsigned char masterkey[CTLEN]; unsigned int plaintextlen; unsigned int ivlen; unsigned char iv[32]; unsigned int cryptextlen; unsigned char cryptext[CTLEN]; unsigned int expectedhmaclen; unsigned char expectedhmac[EHMLEN]; unsigned int hmacdatalen; unsigned char hmacdata[CTLEN]; } *cur_salt; static void init(struct fmt_main *self) { saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); cracked = mem_calloc_tiny(sizeof(*cracked) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int isdecu(char *q) { char buf[24]; unsigned int x = atoi(q); /* this is how it is 'used', atoi() to * unsigned */ sprintf(buf, "%u", x); return !strcmp(q, buf); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len; if (strncmp(ciphertext, "$cloudkeychain$", 15) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 15; if ((p = strtok(ctcopy, "$")) == NULL) /* salt length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* salt */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates salt_len also */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* masterkey length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* masterkey */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates masterkey_len also */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* plaintext length */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv length */ goto err; len = atoi(p); if (len > IVLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv */ goto err; if (strlen(p) != len * 2) /* validates iv_len */ goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* cryptext length */ goto err; len = atoi(p); if (len > CTLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* cryptext */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates cryptext_len */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* expectedhmac length */ goto err; len = atoi(p); if (len > EHMLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* expectedhmac */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates expectedhmac_len */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* hmacdata length */ goto err; len = atoi(p); if (len > CTLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* hmacdata */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates hmacdata_len */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 15; /* skip over "$cloudkeychain$" */ p = strtok(ctcopy, "$"); cs.saltlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.iterations = atoi(p); p = strtok(NULL, "$"); cs.masterkeylen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.masterkeylen; i++) cs.masterkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.plaintextlen = atoi(p); p = strtok(NULL, "$"); cs.ivlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.ivlen; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.cryptextlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.cryptextlen; i++) cs.cryptext[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.expectedhmaclen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.expectedhmaclen; i++) cs.expectedhmac[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.hmacdatalen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.hmacdatalen; i++) cs.hmacdata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void hmac_sha256(uint8_t * pass, uint8_t passlen, uint8_t * salt, uint32_t saltlen, uint32_t add, uint64_t * ret) { uint8_t i, ipad[64], opad[64]; SHA256_CTX ctx; memset(ipad, 0x36, 64); memset(opad, 0x5c, 64); for (i = 0; i < passlen; i++) { ipad[i] ^= pass[i]; opad[i] ^= pass[i]; } SHA256_Init(&ctx); SHA256_Update(&ctx, ipad, 64); SHA256_Update(&ctx, salt, saltlen); if (add > 0) { #if ARCH_LITTLE_ENDIAN add = JOHNSWAP(add); #endif SHA256_Update(&ctx, &add, 4); } SHA256_Final((uint8_t *) ret, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, opad, 64); SHA256_Update(&ctx, (uint8_t *) ret, 32); SHA256_Final((uint8_t *) ret, &ctx); } static int ckcdecrypt(unsigned char *key) { uint64_t tmp[8]; hmac_sha256(key + 32, 32, cur_salt->hmacdata, cur_salt->hmacdatalen, 0, tmp); if (!memcmp(tmp, cur_salt->expectedhmac, 32)) return 1; else return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; uint64_t key[SSE_GROUP_SZ_SHA512][8]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index + i]); pin[i] = (unsigned char *)saved_key[index + i]; x.pout[i] = (ARCH_WORD_32 *) (key[i]); } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0); for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) cracked[index + i] = ckcdecrypt((unsigned char *)(key[i])); #else uint64_t key[8]; pbkdf2_sha512((const unsigned char *)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, (unsigned char *)key, HASH_LENGTH, 0); cracked[index] = ckcdecrypt((unsigned char *)key); #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void cloud_keychain_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char * get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } #endif struct fmt_main fmt_cloud_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif cloud_keychain_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, cloud_keychain_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_cloud_keychain; #elif FMT_REGISTERS_H john_register_one(&fmt_cloud_keychain); #else #include <string.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "stdint.h" #include "sha2.h" #include "pbkdf2_hmac_sha512.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 1 #endif #include "memdbg.h" #define FORMAT_LABEL "cloudkeychain" #define FORMAT_NAME "1Password Cloud Keychain" #ifdef MMX_COEF_SHA512 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define HASH_LENGTH 64 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef MMX_COEF_SHA512 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA512 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SALTLEN 32 #define IVLEN 16 #define CTLEN 2048 #define EHMLEN 32 #define PAD_SIZE 128 static struct fmt_tests cloud_keychain_tests[] = { {"$cloudkeychain$16$2e57e8b57eda4d99df2fe02324960044$227272$336$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$256$16$881d65af6b863f6678d484ff551bc843$272$a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b$32$6fde10044103924d8275bf9bfadc98540ae61c5e59be06c5bca981460345bd29$304$6f706461746130310001000000000000881d65af6b863f6678d484ff551bc843a95faf289b914e570a1993353789b66a9c6bd40b42c588923e8869862339d06ef3d5c091c0ba997a704619b3ffc121b4b126071e9e0a0812f722f95a2d7b80c22bc91fc237cb3dfaba1bee1c9d3cb4c94332335ab203bb0f07ca774c19729ce8182f91cd228ae18fb82b17535ecae012f14904a6ace90d9bab1d934eb957ea98a68b4b2db3c8e02d27f7aff9203cdbd91c2b7c6aaa6f9c2ca3c1d5f976fc9ed86b80082ae3e39c2f30a35d26c2c14dbd64386be9b5ae40851824dc5963b54703ba17d20b424deaaa452793a1ef8418db2dda669b064075e450404a46433f6533dfe0a13b34fa1f55238ffea5062a4f22e821b9e99639c9d0ece27df65caf0aaaad7200b0187e7b3134107e38582ef73b", "fred"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static struct custom_salt { unsigned int saltlen; unsigned char salt[SALTLEN]; unsigned int iterations; unsigned int masterkeylen; unsigned char masterkey[CTLEN]; unsigned int plaintextlen; unsigned int ivlen; unsigned char iv[32]; unsigned int cryptextlen; unsigned char cryptext[CTLEN]; unsigned int expectedhmaclen; unsigned char expectedhmac[EHMLEN]; unsigned int hmacdatalen; unsigned char hmacdata[CTLEN]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); cracked = mem_calloc_tiny(sizeof(*cracked) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int isdecu(char *q) { char buf[24]; unsigned int x = atoi(q); /* this is how it is 'used', atoi() to * unsigned */ sprintf(buf, "%u", x); return !strcmp(q, buf); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int len; if (strncmp(ciphertext, "$cloudkeychain$", 15) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 15; if ((p = strtok(ctcopy, "$")) == NULL) /* salt length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* salt */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates salt_len also */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* iterations */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* masterkey length */ goto err; len = atoi(p); if ((p = strtok(NULL, "$")) == NULL) /* masterkey */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates masterkey_len also */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* plaintext length */ goto err; if (!isdecu(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv length */ goto err; len = atoi(p); if (len > IVLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* iv */ goto err; if (strlen(p) != len * 2) /* validates iv_len */ goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "$")) == NULL) /* cryptext length */ goto err; len = atoi(p); if (len > CTLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* cryptext */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates cryptext_len */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* expectedhmac length */ goto err; len = atoi(p); if (len > EHMLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* expectedhmac */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates expectedhmac_len */ goto err; if ((p = strtok(NULL, "$")) == NULL) /* hmacdata length */ goto err; len = atoi(p); if (len > CTLEN || len < 0) goto err; if ((p = strtok(NULL, "$")) == NULL) /* hmacdata */ goto err; if (!ishex(p)) goto err; if (strlen(p) != len * 2) /* validates hmacdata_len */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 15; /* skip over "$cloudkeychain$" */ p = strtok(ctcopy, "$"); cs.saltlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.saltlen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.iterations = atoi(p); p = strtok(NULL, "$"); cs.masterkeylen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.masterkeylen; i++) cs.masterkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.plaintextlen = atoi(p); p = strtok(NULL, "$"); cs.ivlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.ivlen; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.cryptextlen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.cryptextlen; i++) cs.cryptext[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.expectedhmaclen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.expectedhmaclen; i++) cs.expectedhmac[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "$"); cs.hmacdatalen = atoi(p); p = strtok(NULL, "$"); for (i = 0; i < cs.hmacdatalen; i++) cs.hmacdata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void hmac_sha256(uint8_t * pass, uint8_t passlen, uint8_t * salt, uint32_t saltlen, uint32_t add, uint64_t * ret) { uint8_t i, ipad[64], opad[64]; SHA256_CTX ctx; memset(ipad, 0x36, 64); memset(opad, 0x5c, 64); for (i = 0; i < passlen; i++) { ipad[i] ^= pass[i]; opad[i] ^= pass[i]; } SHA256_Init(&ctx); SHA256_Update(&ctx, ipad, 64); SHA256_Update(&ctx, salt, saltlen); if (add > 0) { #if ARCH_LITTLE_ENDIAN add = JOHNSWAP(add); #endif SHA256_Update(&ctx, &add, 4); } SHA256_Final((uint8_t *) ret, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, opad, 64); SHA256_Update(&ctx, (uint8_t *) ret, 32); SHA256_Final((uint8_t *) ret, &ctx); } static int ckcdecrypt(unsigned char *key) { uint64_t tmp[8]; hmac_sha256(key + 32, 32, cur_salt->hmacdata, cur_salt->hmacdatalen, 0, tmp); if (!memcmp(tmp, cur_salt->expectedhmac, 32)) return 1; else return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; uint64_t key[SSE_GROUP_SZ_SHA512][8]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index + i]); pin[i] = (unsigned char *)saved_key[index + i]; x.pout[i] = (ARCH_WORD_32 *) (key[i]); } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0); for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) cracked[index + i] = ckcdecrypt((unsigned char *)(key[i])); #else uint64_t key[8]; pbkdf2_sha512((const unsigned char *)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, (unsigned char *)key, HASH_LENGTH, 0); cracked[index] = ckcdecrypt((unsigned char *)key); #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void cloud_keychain_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char * get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } #endif struct fmt_main fmt_cloud_keychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif cloud_keychain_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, cloud_keychain_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
diagsm_x_coo_u_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < A->rows; ++r) { alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO * A, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT num_thread = alpha_get_thread_num(); for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < A->rows; ++r) { alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO * A, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < A->rows; ++r) { alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return isVisible(Old) || New->isExternallyVisible(); } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; /// Records and restores the vtordisp state on entry/exit of C++ method body. class VtorDispStackRAII { public: VtorDispStackRAII(Sema &S, bool ShouldSaveAndRestore) : S(S), ShouldSaveAndRestore(ShouldSaveAndRestore), OldVtorDispStack() { if (ShouldSaveAndRestore) OldVtorDispStack = S.VtorDispModeStack; } ~VtorDispStackRAII() { if (ShouldSaveAndRestore) S.VtorDispModeStack = OldVtorDispStack; } private: Sema &S; bool ShouldSaveAndRestore; SmallVector<MSVtorDispAttr::Mode, 2> OldVtorDispStack; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Check if module import may be found in the current context, /// emit error if not. void diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool EnumUnderlyingIsImplicit, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// \brief Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, unsigned &Offset, llvm::InlineAsmIdentifierInfo &Info, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); //===--------------------------------------------------------------------===// // C++ Coroutines TS // ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(SourceLocation KwLoc, Expr *E); ExprResult BuildCoawaitExpr(SourceLocation KwLoc, Expr *E); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// \brief Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); /// \brief Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, SourceLocation DepLinMapLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions, if ConvertRHS // is true. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_LastResort, // Lowest priority. Only in effect if // LangOpts.CUDADisableTargetCallChecks is true. CFP_Fallback, // Low priority caller/callee combination CFP_Best, // Preferred caller/callee combination }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<FunctionDecl *> &Matches); void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<DeclAccessPair> &Matches); void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartImpl(CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinMSVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return isVisible(Old) || New->isExternallyVisible(); } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; /// Records and restores the vtordisp state on entry/exit of C++ method body. class VtorDispStackRAII { public: VtorDispStackRAII(Sema &S, bool ShouldSaveAndRestore) : S(S), ShouldSaveAndRestore(ShouldSaveAndRestore), OldVtorDispStack() { if (ShouldSaveAndRestore) OldVtorDispStack = S.VtorDispModeStack; } ~VtorDispStackRAII() { if (ShouldSaveAndRestore) S.VtorDispModeStack = OldVtorDispStack; } private: Sema &S; bool ShouldSaveAndRestore; SmallVector<MSVtorDispAttr::Mode, 2> OldVtorDispStack; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Check if module import may be found in the current context, /// emit error if not. void diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool EnumUnderlyingIsImplicit, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// \brief Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, unsigned &Offset, llvm::InlineAsmIdentifierInfo &Info, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); //===--------------------------------------------------------------------===// // C++ Coroutines TS // ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(SourceLocation KwLoc, Expr *E); ExprResult BuildCoawaitExpr(SourceLocation KwLoc, Expr *E); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// \brief Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); /// \brief Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed ' DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, SourceLocation DepLinMapLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions, if ConvertRHS // is true. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_LastResort, // Lowest priority. Only in effect if // LangOpts.CUDADisableTargetCallChecks is true. CFP_Fallback, // Low priority caller/callee combination CFP_Best, // Preferred caller/callee combination }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<FunctionDecl *> &Matches); void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<DeclAccessPair> &Matches); void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartImpl(CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinMSVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return isVisible(Old) || New->isExternallyVisible(); } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; /// Records and restores the vtordisp state on entry/exit of C++ method body. class VtorDispStackRAII { public: VtorDispStackRAII(Sema &S, bool ShouldSaveAndRestore) : S(S), ShouldSaveAndRestore(ShouldSaveAndRestore), OldVtorDispStack() { if (ShouldSaveAndRestore) OldVtorDispStack = S.VtorDispModeStack; } ~VtorDispStackRAII() { if (ShouldSaveAndRestore) S.VtorDispModeStack = OldVtorDispStack; } private: Sema &S; bool ShouldSaveAndRestore; SmallVector<MSVtorDispAttr::Mode, 2> OldVtorDispStack; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Check if module import may be found in the current context, /// emit error if not. void diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool EnumUnderlyingIsImplicit, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// \brief Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, unsigned &Offset, llvm::InlineAsmIdentifierInfo &Info, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); //===--------------------------------------------------------------------===// // C++ Coroutines TS // ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(SourceLocation KwLoc, Expr *E); ExprResult BuildCoawaitExpr(SourceLocation KwLoc, Expr *E); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// \brief Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); /// \brief Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, SourceLocation DepLinMapLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'map' clause. OMPClause *ActOnOpenMPMapClause( OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions, if ConvertRHS // is true. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_LastResort, // Lowest priority. Only in effect if // LangOpts.CUDADisableTargetCallChecks is true. CFP_Fallback, // Low priority caller/callee combination CFP_Best, // Preferred caller/callee combination }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<FunctionDecl *> &Matches); void EraseUnwantedCUDAMatches(const FunctionDecl *Caller, SmallVectorImpl<DeclAccessPair> &Matches); void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartImpl(CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinMSVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
multiply.h
#include "CSR.h" #include <omp.h> #include <algorithm> template <bool sortOutput, typename IT> int MKLSpGEMM_symbolic(const CSR<IT,float> &A, const CSR<IT,float> &B, CSR<IT,float> &C) { int request = 1; int sort = 7; // don't sort anything int info = 0; // output info flag mkl_scsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, NULL, NULL, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT> void MKLSpGEMM_symbolic(const CSR<IT,double> &A, const CSR<IT,double> &B, CSR<IT,double> &C) { int request = 1; int sort = 7; // don't sort anything int info = 0; // output info flag mkl_dcsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, NULL, NULL, C.rowptr, NULL, &info); } template <bool sortOutput, typename IT> int MKLSpGEMM_numeric(const CSR<IT,float> &A, const CSR<IT,float> &B, CSR<IT,float> &C) { int request = 2; int sort = 7; int info = 0; // output info flag if (sortOutput) { sort = 8; // sort nonzeroes in rows of C, leave A and B alone (they are already sorted) } mkl_scsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, C.values, C.colids, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT> int MKLSpGEMM_numeric(const CSR<IT,double> &A, const CSR<IT,double> &B, CSR<IT,double> &C) { int request = 2; int sort = 7; int info = 0; // output info flag if (sortOutput) { sort = 8; // sort nonzeroes in rows of C, leave A and B alone (they are already sorted) } mkl_dcsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, C.values, C.colids, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT, typename NT> void MKLSpGEMM(const CSR<IT,NT> &A, const CSR<IT,NT> &B, CSR<IT,NT> &C) { // for request=1, mkl_dcsrmultcsr() computes only values of the array ic of length m + 1, // the memory for this array must be allocated beforehand. On exit the value // ic(m+1) - 1 is the actual number of the elements in the arrays c and jc int info; if (typeid(IT) != typeid(int)) { cout << "MKL does not support non-int type indices." << endl; return; } C.rows = A.rows; C.cols = B.cols; C.rowptr = my_malloc<IT>(C.rows + 1); C.zerobased = false; info = MKLSpGEMM_symbolic<sortOutput, IT>(A, B, C); if (info != 0) { cout << "MKL-Count Error: info returned " << info << endl; assert(info == 0); } C.nnz = C.rowptr[A.rows] - 1; C.colids = my_malloc<IT>(C.nnz); C.values = my_malloc<NT>(C.nnz); // for request=2, mkl_dcsrmultcsr() has been called previously with the parameter request=1, // the output arrays jc and c are allocated in the calling program and they are of the length ic(m+1) - 1 at least. info = MKLSpGEMM_numeric<sortOutput, IT>(A, B, C); if (info != 0) { printf("MKL-Calculation Error: info returned %d\n", info); assert(info == 0); } } template <typename IT, typename NT> long long int get_flop(const CSR<IT,NT> & A, const CSR<IT,NT> & B) { long long int flops = 0; // total flops (multiplication) needed to generate C long long int tflops=0; //thread private flops for (IT i=0; i < A.rows; ++i) { // for all rows of A long long int locmax = 0; for (IT j=A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column long long int inner = A.colids[j]; // get the row id of B (or column id of A) long long int npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nonzeros in A's corresponding column locmax += npins; } tflops += locmax; } flops += tflops; return (flops * 2); } template <typename IT, typename NT> long long int get_flop(const CSC<IT, NT> &A, const CSR<IT, NT> &B) { long long int flops = 0; #pragma omp parallel for reduction(+ \ : flops) for (IT i = 0; i < A.cols; ++i) { IT colnnz = A.colptr[i + 1] - A.colptr[i]; IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; flops += (colnnz * rownnz); } return (flops * 2); }
#include "CSR.h" #include <omp.h> #include <algorithm> template <bool sortOutput, typename IT> int MKLSpGEMM_symbolic(const CSR<IT,float> &A, const CSR<IT,float> &B, CSR<IT,float> &C) { int request = 1; int sort = 7; // don't sort anything int info = 0; // output info flag mkl_scsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, NULL, NULL, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT> void MKLSpGEMM_symbolic(const CSR<IT,double> &A, const CSR<IT,double> &B, CSR<IT,double> &C) { int request = 1; int sort = 7; // don't sort anything int info = 0; // output info flag mkl_dcsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, NULL, NULL, C.rowptr, NULL, &info); } template <bool sortOutput, typename IT> int MKLSpGEMM_numeric(const CSR<IT,float> &A, const CSR<IT,float> &B, CSR<IT,float> &C) { int request = 2; int sort = 7; int info = 0; // output info flag if (sortOutput) { sort = 8; // sort nonzeroes in rows of C, leave A and B alone (they are already sorted) } mkl_scsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, C.values, C.colids, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT> int MKLSpGEMM_numeric(const CSR<IT,double> &A, const CSR<IT,double> &B, CSR<IT,double> &C) { int request = 2; int sort = 7; int info = 0; // output info flag if (sortOutput) { sort = 8; // sort nonzeroes in rows of C, leave A and B alone (they are already sorted) } mkl_dcsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, C.values, C.colids, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT, typename NT> void MKLSpGEMM(const CSR<IT,NT> &A, const CSR<IT,NT> &B, CSR<IT,NT> &C) { // for request=1, mkl_dcsrmultcsr() computes only values of the array ic of length m + 1, // the memory for this array must be allocated beforehand. On exit the value // ic(m+1) - 1 is the actual number of the elements in the arrays c and jc int info; if (typeid(IT) != typeid(int)) { cout << "MKL does not support non-int type indices." << endl; return; } C.rows = A.rows; C.cols = B.cols; C.rowptr = my_malloc<IT>(C.rows + 1); C.zerobased = false; info = MKLSpGEMM_symbolic<sortOutput, IT>(A, B, C); if (info != 0) { cout << "MKL-Count Error: info returned " << info << endl; assert(info == 0); } C.nnz = C.rowptr[A.rows] - 1; C.colids = my_malloc<IT>(C.nnz); C.values = my_malloc<NT>(C.nnz); // for request=2, mkl_dcsrmultcsr() has been called previously with the parameter request=1, // the output arrays jc and c are allocated in the calling program and they are of the length ic(m+1) - 1 at least. info = MKLSpGEMM_numeric<sortOutput, IT>(A, B, C); if (info != 0) { printf("MKL-Calculation Error: info returned %d\n", info); assert(info == 0); } } template <typename IT, typename NT> long long int get_flop(const CSR<IT,NT> & A, const CSR<IT,NT> & B) { long long int flops = 0; // total flops (multiplication) needed to generate C long long int tflops=0; //thread private flops for (IT i=0; i < A.rows; ++i) { // for all rows of A long long int locmax = 0; for (IT j=A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column long long int inner = A.colids[j]; // get the row id of B (or column id of A) long long int npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nonzeros in A's corresponding column locmax += npins; } tflops += locmax; } flops += tflops; return (flops * 2); } template <typename IT, typename NT> long long int get_flop(const CSC<IT, NT> &A, const CSR<IT, NT> &B) { long long int flops = 0; : flops) for (IT i = 0; i < A.cols; ++i) { IT colnnz = A.colptr[i + 1] - A.colptr[i]; IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; flops += (colnnz * rownnz); } return (flops * 2); }
#include "CSR.h" #include <omp.h> #include <algorithm> template <bool sortOutput, typename IT> int MKLSpGEMM_symbolic(const CSR<IT,float> &A, const CSR<IT,float> &B, CSR<IT,float> &C) { int request = 1; int sort = 7; // don't sort anything int info = 0; // output info flag mkl_scsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, NULL, NULL, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT> void MKLSpGEMM_symbolic(const CSR<IT,double> &A, const CSR<IT,double> &B, CSR<IT,double> &C) { int request = 1; int sort = 7; // don't sort anything int info = 0; // output info flag mkl_dcsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, NULL, NULL, C.rowptr, NULL, &info); } template <bool sortOutput, typename IT> int MKLSpGEMM_numeric(const CSR<IT,float> &A, const CSR<IT,float> &B, CSR<IT,float> &C) { int request = 2; int sort = 7; int info = 0; // output info flag if (sortOutput) { sort = 8; // sort nonzeroes in rows of C, leave A and B alone (they are already sorted) } mkl_scsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, C.values, C.colids, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT> int MKLSpGEMM_numeric(const CSR<IT,double> &A, const CSR<IT,double> &B, CSR<IT,double> &C) { int request = 2; int sort = 7; int info = 0; // output info flag if (sortOutput) { sort = 8; // sort nonzeroes in rows of C, leave A and B alone (they are already sorted) } mkl_dcsrmultcsr((char*)"N", &request, &sort, &(A.rows), &(A.cols), &(B.cols), A.values, A.colids, A.rowptr, B.values, B.colids, B.rowptr, C.values, C.colids, C.rowptr, NULL, &info); return info; } template <bool sortOutput, typename IT, typename NT> void MKLSpGEMM(const CSR<IT,NT> &A, const CSR<IT,NT> &B, CSR<IT,NT> &C) { // for request=1, mkl_dcsrmultcsr() computes only values of the array ic of length m + 1, // the memory for this array must be allocated beforehand. On exit the value // ic(m+1) - 1 is the actual number of the elements in the arrays c and jc int info; if (typeid(IT) != typeid(int)) { cout << "MKL does not support non-int type indices." << endl; return; } C.rows = A.rows; C.cols = B.cols; C.rowptr = my_malloc<IT>(C.rows + 1); C.zerobased = false; info = MKLSpGEMM_symbolic<sortOutput, IT>(A, B, C); if (info != 0) { cout << "MKL-Count Error: info returned " << info << endl; assert(info == 0); } C.nnz = C.rowptr[A.rows] - 1; C.colids = my_malloc<IT>(C.nnz); C.values = my_malloc<NT>(C.nnz); // for request=2, mkl_dcsrmultcsr() has been called previously with the parameter request=1, // the output arrays jc and c are allocated in the calling program and they are of the length ic(m+1) - 1 at least. info = MKLSpGEMM_numeric<sortOutput, IT>(A, B, C); if (info != 0) { printf("MKL-Calculation Error: info returned %d\n", info); assert(info == 0); } } template <typename IT, typename NT> long long int get_flop(const CSR<IT,NT> & A, const CSR<IT,NT> & B) { long long int flops = 0; // total flops (multiplication) needed to generate C long long int tflops=0; //thread private flops for (IT i=0; i < A.rows; ++i) { // for all rows of A long long int locmax = 0; for (IT j=A.rowptr[i]; j < A.rowptr[i + 1]; ++j) { // For all the nonzeros of the ith column long long int inner = A.colids[j]; // get the row id of B (or column id of A) long long int npins = B.rowptr[inner + 1] - B.rowptr[inner]; // get the number of nonzeros in A's corresponding column locmax += npins; } tflops += locmax; } flops += tflops; return (flops * 2); } template <typename IT, typename NT> long long int get_flop(const CSC<IT, NT> &A, const CSR<IT, NT> &B) { long long int flops = 0; #pragma omp parallel for reduction(+ \ : flops) for (IT i = 0; i < A.cols; ++i) { IT colnnz = A.colptr[i + 1] - A.colptr[i]; IT rownnz = B.rowptr[i + 1] - B.rowptr[i]; flops += (colnnz * rownnz); } return (flops * 2); }
GB_unop__ainv_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_fp32_fp32) // op(A') function: GB (_unop_tran__ainv_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = -z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = -z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_fp32_fp32) // op(A') function: GB (_unop_tran__ainv_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = -z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = -z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_fp32_fp32) // op(A') function: GB (_unop_tran__ainv_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = -z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = -z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__ge_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp32) // A*D function (colscale): GB (_AxD__ge_fp32) // D*A function (rowscale): GB (_DxB__ge_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__ge_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ge_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp32) // C=scalar+B GB (_bind1st__ge_fp32) // C=scalar+B' GB (_bind1st_tran__ge_fp32) // C=A+scalar GB (_bind2nd__ge_fp32) // C=A'+scalar GB (_bind2nd_tran__ge_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp32) // A*D function (colscale): GB (_AxD__ge_fp32) // D*A function (rowscale): GB (_DxB__ge_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__ge_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ge_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp32) // C=scalar+B GB (_bind1st__ge_fp32) // C=scalar+B' GB (_bind1st_tran__ge_fp32) // C=A+scalar GB (_bind2nd__ge_fp32) // C=A'+scalar GB (_bind2nd_tran__ge_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ge_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp32) // A*D function (colscale): GB (_AxD__ge_fp32) // D*A function (rowscale): GB (_DxB__ge_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__ge_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ge_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp32) // C=scalar+B GB (_bind1st__ge_fp32) // C=scalar+B' GB (_bind1st_tran__ge_fp32) // C=A+scalar GB (_bind2nd__ge_fp32) // C=A'+scalar GB (_bind2nd_tran__ge_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp-axpygpu.c
// // omp-axpy.c // // // Created by Yaying Shi on 10/2/19. // /* Manual implementation of the following AXPY OpenMP offloading version using LLVM OpenMP runtime #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i,j; #pragma omp target map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i){ Y[i] += a * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ Y[i] = (((float)rand()/(float)(10)) * x); X[i] = (((float)rand()/(float)(10)) * x); printf("this is Y: %f\n",Y[i]); } float a = 0.5; axpy(N,&Y[0],&X[0],a); return 0; } */ #include "omp-axpycpu.h" //extern void __kmpc_fork_call(ident_t *, kmp_int32, kmpc_micro, ...); //extern void __kmpc_for_static_init_4(ident_t *, kmp_int32,kmp_int32,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32,kmp_int32); //extern void __kmpc_for_static_fini(ident_t *, kmp_int32); //extern void __kmpc_global_thread_num(ident_t *); struct __tgt_offload_entry { void *addr; char *name; size_t size; int32_t flags; int32_t reserved; }; struct __tgt_device_image { void *ImageStart; void *ImageEnd; __tgt_offload_entry *EntriesBegin; __tgt_offload_entry *EntriesEnd; }; struct __tgt_bin_desc { int32_t NumDeviceImages; __tgt_device_image *DeviceImages; __tgt_offload_entry *HostEntriesBegin; __tgt_offload_entry *HostEntriesEnd; }; void __kmp_axpy_microtask(int *gtid, int *btid, int N, float *a, float *Y, float *X){ __attribute__((visibility("hidden"))) __tgt_offload_entry *__start_omp_offloading_entries; __attribute__((visibility("hidden"))) __tgt_offload_entry *__stop_omp_offloading_entries; const char imag[] = ; static const __tgt_device_image Images[] = { { Image0, /*ImageStart*/ Image0 + sizeof(Image0), /*ImageEnd*/ __start_omp_offloading_entries, /*EntriesBegin*/ __stop_omp_offloading_entries /*EntriesEnd*/ } static const __tgt_bin_desc BinDesc = { sizeof(Images) / sizeof(Images[0]), /*NumDeviceImages*/ Images, /*DeviceImages*/ __start_omp_offloading_entries, /*HostEntriesBegin*/ __stop_omp_offloading_entries /*HostEntriesEnd*/ } auto int last,upper,lower,inc; //last = N; //lower = 0; //upper = N; //inc = 1; __kmpc_for_static_init_4(NULL, *gtid, 33, &last,&lower,&upper,&inc,1,1); for (int i = 0; i < N; ++i){ Y[i] += (*a) * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } __kmpc_for_static_fini(NULL,*gtid); } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ //Y[i] = (((float)rand()/(float)(10)) * x); //X[i] = (((float)rand()/(float)(10)) * x); Y[i]=1.0; X[i]=1.0; printf("this is Y: %f\n",Y[i]); } float a = 0.5; auto int gtid; __kmpc_begin(NULL, 0); gtid = __kmpc_global_thread_num(NULL); __kmpc_fork_call(NULL, 4, __kmp_axpy_microtask,N,&a,&Y[0],&X[0]); __kmpc_end(NULL); return 0; }
// // omp-axpy.c // // // Created by Yaying Shi on 10/2/19. // /* Manual implementation of the following AXPY OpenMP offloading version using LLVM OpenMP runtime #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i,j; for (i = 0; i < N; ++i){ Y[i] += a * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ Y[i] = (((float)rand()/(float)(10)) * x); X[i] = (((float)rand()/(float)(10)) * x); printf("this is Y: %f\n",Y[i]); } float a = 0.5; axpy(N,&Y[0],&X[0],a); return 0; } */ #include "omp-axpycpu.h" //extern void __kmpc_fork_call(ident_t *, kmp_int32, kmpc_micro, ...); //extern void __kmpc_for_static_init_4(ident_t *, kmp_int32,kmp_int32,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32,kmp_int32); //extern void __kmpc_for_static_fini(ident_t *, kmp_int32); //extern void __kmpc_global_thread_num(ident_t *); struct __tgt_offload_entry { void *addr; char *name; size_t size; int32_t flags; int32_t reserved; }; struct __tgt_device_image { void *ImageStart; void *ImageEnd; __tgt_offload_entry *EntriesBegin; __tgt_offload_entry *EntriesEnd; }; struct __tgt_bin_desc { int32_t NumDeviceImages; __tgt_device_image *DeviceImages; __tgt_offload_entry *HostEntriesBegin; __tgt_offload_entry *HostEntriesEnd; }; void __kmp_axpy_microtask(int *gtid, int *btid, int N, float *a, float *Y, float *X){ __attribute__((visibility("hidden"))) __tgt_offload_entry *__start_omp_offloading_entries; __attribute__((visibility("hidden"))) __tgt_offload_entry *__stop_omp_offloading_entries; const char imag[] = ; static const __tgt_device_image Images[] = { { Image0, /*ImageStart*/ Image0 + sizeof(Image0), /*ImageEnd*/ __start_omp_offloading_entries, /*EntriesBegin*/ __stop_omp_offloading_entries /*EntriesEnd*/ } static const __tgt_bin_desc BinDesc = { sizeof(Images) / sizeof(Images[0]), /*NumDeviceImages*/ Images, /*DeviceImages*/ __start_omp_offloading_entries, /*HostEntriesBegin*/ __stop_omp_offloading_entries /*HostEntriesEnd*/ } auto int last,upper,lower,inc; //last = N; //lower = 0; //upper = N; //inc = 1; __kmpc_for_static_init_4(NULL, *gtid, 33, &last,&lower,&upper,&inc,1,1); for (int i = 0; i < N; ++i){ Y[i] += (*a) * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } __kmpc_for_static_fini(NULL,*gtid); } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ //Y[i] = (((float)rand()/(float)(10)) * x); //X[i] = (((float)rand()/(float)(10)) * x); Y[i]=1.0; X[i]=1.0; printf("this is Y: %f\n",Y[i]); } float a = 0.5; auto int gtid; __kmpc_begin(NULL, 0); gtid = __kmpc_global_thread_num(NULL); __kmpc_fork_call(NULL, 4, __kmp_axpy_microtask,N,&a,&Y[0],&X[0]); __kmpc_end(NULL); return 0; }
// // omp-axpy.c // // // Created by Yaying Shi on 10/2/19. // /* Manual implementation of the following AXPY OpenMP offloading version using LLVM OpenMP runtime #include "omp-axpy.h" void axpy(int N, float *Y, float *X, float a) { int i,j; #pragma omp target map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i){ Y[i] += a * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ Y[i] = (((float)rand()/(float)(10)) * x); X[i] = (((float)rand()/(float)(10)) * x); printf("this is Y: %f\n",Y[i]); } float a = 0.5; axpy(N,&Y[0],&X[0],a); return 0; } */ #include "omp-axpycpu.h" //extern void __kmpc_fork_call(ident_t *, kmp_int32, kmpc_micro, ...); //extern void __kmpc_for_static_init_4(ident_t *, kmp_int32,kmp_int32,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32 *,kmp_int32,kmp_int32); //extern void __kmpc_for_static_fini(ident_t *, kmp_int32); //extern void __kmpc_global_thread_num(ident_t *); struct __tgt_offload_entry { void *addr; char *name; size_t size; int32_t flags; int32_t reserved; }; struct __tgt_device_image { void *ImageStart; void *ImageEnd; __tgt_offload_entry *EntriesBegin; __tgt_offload_entry *EntriesEnd; }; struct __tgt_bin_desc { int32_t NumDeviceImages; __tgt_device_image *DeviceImages; __tgt_offload_entry *HostEntriesBegin; __tgt_offload_entry *HostEntriesEnd; }; void __kmp_axpy_microtask(int *gtid, int *btid, int N, float *a, float *Y, float *X){ __attribute__((visibility("hidden"))) __tgt_offload_entry *__start_omp_offloading_entries; __attribute__((visibility("hidden"))) __tgt_offload_entry *__stop_omp_offloading_entries; const char imag[] = ; static const __tgt_device_image Images[] = { { Image0, /*ImageStart*/ Image0 + sizeof(Image0), /*ImageEnd*/ __start_omp_offloading_entries, /*EntriesBegin*/ __stop_omp_offloading_entries /*EntriesEnd*/ } static const __tgt_bin_desc BinDesc = { sizeof(Images) / sizeof(Images[0]), /*NumDeviceImages*/ Images, /*DeviceImages*/ __start_omp_offloading_entries, /*HostEntriesBegin*/ __stop_omp_offloading_entries /*HostEntriesEnd*/ } auto int last,upper,lower,inc; //last = N; //lower = 0; //upper = N; //inc = 1; __kmpc_for_static_init_4(NULL, *gtid, 33, &last,&lower,&upper,&inc,1,1); for (int i = 0; i < N; ++i){ Y[i] += (*a) * X[i]; printf("this a tset: %f %f\n",X[i],Y[i]); } __kmpc_for_static_fini(NULL,*gtid); } int main(int argc, char*argv[]){ int N = 100; float Y[N], X[N]; float x = 5.0; for (int i = 0; i <N; i++){ //Y[i] = (((float)rand()/(float)(10)) * x); //X[i] = (((float)rand()/(float)(10)) * x); Y[i]=1.0; X[i]=1.0; printf("this is Y: %f\n",Y[i]); } float a = 0.5; auto int gtid; __kmpc_begin(NULL, 0); gtid = __kmpc_global_thread_num(NULL); __kmpc_fork_call(NULL, 4, __kmp_axpy_microtask,N,&a,&Y[0],&X[0]); __kmpc_end(NULL); return 0; }
omp_target_debug.c
// RUN: %libomptarget-compile-generic && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-generic 2>&1 | %fcheck-generic -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-generic && env LIBOMPTARGET_DEBUG=0 %libomptarget-run-generic 2>&1 | %fcheck-generic -allow-empty -check-prefix=NDEBUG // REQUIRES: libomptarget-debug int main(void) { #pragma omp target {} return 0; } // DEBUG: Libomptarget // NDEBUG-NOT: Libomptarget // NDEBUG-NOT: Target
// RUN:%libomptarget - compile - generic && env LIBOMPTARGET_DEBUG = 1 % libomptarget - run - generic 2 > &1 | %fcheck - generic - allow - empty - check - prefix = DEBUG // RUN:%libomptarget - compile - generic && env LIBOMPTARGET_DEBUG = 0 % libomptarget - run - generic 2 > &1 | %fcheck - generic - allow - empty - check - prefix = NDEBUG // REQUIRES:libomptarget - debug int main(void) { return 0; } //DEBUG:Libomptarget // NDEBUG - NOT:Libomptarget // NDEBUG - NOT:Target
// RUN:%libomptarget - compile - generic && env LIBOMPTARGET_DEBUG = 1 % libomptarget - run - generic 2 > &1 | %fcheck - generic - allow - empty - check - prefix = DEBUG // RUN:%libomptarget - compile - generic && env LIBOMPTARGET_DEBUG = 0 % libomptarget - run - generic 2 > &1 | %fcheck - generic - allow - empty - check - prefix = NDEBUG // REQUIRES:libomptarget - debug int main(void) { #pragma omp target { } return 0; } //DEBUG:Libomptarget // NDEBUG - NOT:Libomptarget // NDEBUG - NOT:Target
GB_kroner.c
//------------------------------------------------------------------------------ // GB_kroner: Kronecker product, C = kron (A,B) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = kron(A,B) where op determines the binary multiplier to use. The type of // A and B are compatible with the x and y inputs of z=op(x,y), but can be // different. The type of C is the type of z. C is hypersparse if either A // or B are hypersparse. // FUTURE: this would be faster with built-in types and operators. // FUTURE: at most one thread is used for each vector of C=kron(A,B). The // matrix C is normally very large, but if both A and B are n-by-1, then C is // n^2-by-1 and only a single thread is used. A better method for this case // would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not // accounted for in the parallel load-balancing. #include "GB_kron.h" #include "GB_emult.h" #define GB_FREE_WORK \ { \ GB_phbix_free (A2) ; \ GB_phbix_free (B2) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_phbix_free (C) ; \ } GrB_Info GB_kroner // C = kron (A,B) ( GrB_Matrix C, // output matrix (static header) const bool C_is_csc, // desired format of C const GrB_BinaryOp op, // multiply operator const GrB_Matrix A_in, // input matrix bool A_is_pattern, // true if values of A are not used const GrB_Matrix B_in, // input matrix bool B_is_pattern, // true if values of B are not used GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && C->static_header) ; struct GB_Matrix_opaque A2_header, B2_header ; GrB_Matrix A2 = GB_clear_static_header (&A2_header) ; GrB_Matrix B2 = GB_clear_static_header (&B2_header) ; ASSERT_MATRIX_OK (A_in, "A_in for kron (A,B)", GB0) ; ASSERT_MATRIX_OK (B_in, "B_in for kron (A,B)", GB0) ; ASSERT_BINARYOP_OK (op, "op for kron (A,B)", GB0) ; //-------------------------------------------------------------------------- // finish any pending work //-------------------------------------------------------------------------- GB_MATRIX_WAIT (A_in) ; GB_MATRIX_WAIT (B_in) ; //-------------------------------------------------------------------------- // bitmap case: create sparse copies of A and B if they are bitmap //-------------------------------------------------------------------------- GrB_Matrix A = A_in ; if (GB_IS_BITMAP (A)) { GBURBLE ("A:") ; // set A2->iso = A->iso OK: no need for burble GB_OK (GB_dup_worker (&A2, A->iso, A, true, NULL, Context)) ; ASSERT_MATRIX_OK (A2, "dup A2 for kron (A,B)", GB0) ; GB_OK (GB_convert_bitmap_to_sparse (A2, Context)) ; ASSERT_MATRIX_OK (A2, "to sparse, A2 for kron (A,B)", GB0) ; A = A2 ; } GrB_Matrix B = B_in ; if (GB_IS_BITMAP (B)) { GBURBLE ("B:") ; // set B2->iso = B->iso OK: no need for burble GB_OK (GB_dup_worker (&B2, B->iso, B, true, NULL, Context)) ; ASSERT_MATRIX_OK (B2, "dup B2 for kron (A,B)", GB0) ; GB_OK (GB_convert_bitmap_to_sparse (B2, Context)) ; ASSERT_MATRIX_OK (B2, "to sparse, A2 for kron (A,B)", GB0) ; B = B2 ; } //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const GB_void *restrict Ax = A_is_pattern ? NULL : ((GB_void *) A->x) ; const int64_t asize = A->type->size ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; int64_t anvec = A->nvec ; int64_t anz = GB_nnz (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int64_t *restrict Bi = B->i ; const GB_void *restrict Bx = B_is_pattern ? NULL : ((GB_void *) B->x) ; const int64_t bsize = B->type->size ; const int64_t bvlen = B->vlen ; const int64_t bvdim = B->vdim ; int64_t bnvec = B->nvec ; int64_t bnz = GB_nnz (B) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- double work = ((double) anz) * ((double) bnz) + (((double) anvec) * ((double) bnvec)) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- GrB_Type ctype = op->ztype ; const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate the output matrix C //-------------------------------------------------------------------------- // C has the same type as z for the multiply operator, z=op(x,y) GrB_Index cvlen, cvdim, cnzmax, cnvec ; bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ; ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ; ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ; ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ; ASSERT (ok) ; if (C_iso) { // the values of A and B are no longer needed if C is iso GBURBLE ("(iso kron) ") ; A_is_pattern = true ; B_is_pattern = true ; } // C is hypersparse if either A or B are hypersparse. It is never bitmap. bool C_is_hyper = (cvdim > 1) && (Ah != NULL || Bh != NULL) ; bool C_is_full = GB_as_if_full (A) && GB_as_if_full (B) ; int sparsity = C_is_full ? GxB_FULL : ((C_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE) ; // set C->iso = C_iso OK GB_OK (GB_new_bix (&C, true, // full, sparse, or hyper; static header ctype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, sparsity, true, B->hyper_switch, cnvec, cnzmax, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // get C and the operator //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = (GB_void *) C->x ; int64_t *restrict Cx_int64 = NULL ; int32_t *restrict Cx_int32 = NULL ; GxB_binary_function fmult = op->function ; GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; GB_cast_function cast_A = NULL, cast_B = NULL ; if (!A_is_pattern) { cast_A = GB_cast_factory (op->xtype->code, A->type->code) ; } if (!B_is_pattern) { cast_B = GB_cast_factory (op->ytype->code, B->type->code) ; } int64_t offset = 0 ; if (op_is_positional) { offset = GB_positional_offset (opcode) ; Cx_int64 = (int64_t *) Cx ; Cx_int32 = (int32_t *) Cx ; } bool is64 = (ctype == GrB_INT64) ; //-------------------------------------------------------------------------- // compute the column counts of C, and C->h if C is hypersparse //-------------------------------------------------------------------------- int64_t kC ; if (!C_is_full) { #pragma omp parallel for num_threads(nthreads) schedule(guided) for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t aknz = (Ap == NULL) ? avlen : (Ap [kA+1] - Ap [kA]) ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t bknz = (Bp == NULL) ? bvlen : (Bp [kB+1] - Bp [kB]) ; // determine # entries in C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; if (!C_is_full) { Cp [kC] = aknz * bknz ; } if (C_is_hyper) { Ch [kC] = jA * bvdim + jB ; } } GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads, Context) ; if (C_is_hyper) C->nvec = cnvec ; } C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // C = kron (A,B) where C is iso and full //-------------------------------------------------------------------------- if (C_iso) { // Cx [0] = cscalar = op (A,B) memcpy (C->x, cscalar, csize) ; if (C_is_full) { // no more work to do if C is iso and full ASSERT_MATRIX_OK (C, "C=kron(A,B), iso full", GB0) ; GB_FREE_WORK ; return (GrB_SUCCESS) ; } } //-------------------------------------------------------------------------- // C = kron (A,B) //-------------------------------------------------------------------------- const bool A_iso = A->iso ; const bool B_iso = B->iso ; #pragma omp parallel for num_threads(nthreads) schedule(guided) for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t pB_start = GBP (Bp, kB, bvlen) ; int64_t pB_end = GBP (Bp, kB+1, bvlen) ; int64_t bknz = pB_start - pB_end ; if (bknz == 0) continue ; GB_void bwork [GB_VLA(bsize)] ; if (!B_is_pattern && B_iso) { cast_B (bwork, Bx, bsize) ; } // get C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; int64_t pC = GBP (Cp, kC, cvlen) ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t pA_start = GBP (Ap, kA, avlen) ; int64_t pA_end = GBP (Ap, kA+1, avlen) ; GB_void awork [GB_VLA(asize)] ; if (!A_is_pattern && A_iso) { cast_A (awork, Ax, asize) ; } for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // awork = A(iA,jA), typecasted to op->xtype int64_t iA = GBI (Ai, pA, avlen) ; int64_t iAblock = iA * bvlen ; if (!A_is_pattern && !A_iso) { cast_A (awork, Ax + (pA*asize), asize) ; } for (int64_t pB = pB_start ; pB < pB_end ; pB++) { // bwork = B(iB,jB), typecasted to op->ytype int64_t iB = GBI (Bi, pB, bvlen) ; if (!B_is_pattern && !B_iso) { cast_B (bwork, Bx +(pB*bsize), bsize) ; } // C(iC,jC) = A(iA,jA) * B(iB,jB) if (!C_is_full) { int64_t iC = iAblock + iB ; Ci [pC] = iC ; } if (op_is_positional) { // positional binary operator switch (opcode) { case GB_FIRSTI_opcode : // z = first_i(A(iA,jA),y) == iA case GB_FIRSTI1_opcode : // z = first_i1(A(iA,jA),y) == iA+1 if (is64) { Cx_int64 [pC] = iA + offset ; } else { Cx_int32 [pC] = (int32_t) (iA + offset) ; } break ; case GB_FIRSTJ_opcode : // z = first_j(A(iA,jA),y) == jA case GB_FIRSTJ1_opcode : // z = first_j1(A(iA,jA),y) == jA+1 if (is64) { Cx_int64 [pC] = jA + offset ; } else { Cx_int32 [pC] = (int32_t) (jA + offset) ; } break ; case GB_SECONDI_opcode : // z = second_i(x,B(iB,jB)) == iB case GB_SECONDI1_opcode : // z = second_i1(x,B(iB,jB)) == iB+1 if (is64) { Cx_int64 [pC] = iB + offset ; } else { Cx_int32 [pC] = (int32_t) (iB + offset) ; } break ; case GB_SECONDJ_opcode : // z = second_j(x,B(iB,jB)) == jB case GB_SECONDJ1_opcode : // z = second_j1(x,B(iB,jB)) == jB+1 if (is64) { Cx_int64 [pC] = jB + offset ; } else { Cx_int32 [pC] = (int32_t) (jB + offset) ; } break ; default: ; } } else if (!C_iso) { // standard binary operator fmult (Cx +(pC*csize), awork, bwork) ; } pC++ ; } } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C=kron(A,B)", GB0) ; GB_FREE_WORK ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // GB_kroner: Kronecker product, C = kron (A,B) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = kron(A,B) where op determines the binary multiplier to use. The type of // A and B are compatible with the x and y inputs of z=op(x,y), but can be // different. The type of C is the type of z. C is hypersparse if either A // or B are hypersparse. // FUTURE: this would be faster with built-in types and operators. // FUTURE: at most one thread is used for each vector of C=kron(A,B). The // matrix C is normally very large, but if both A and B are n-by-1, then C is // n^2-by-1 and only a single thread is used. A better method for this case // would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not // accounted for in the parallel load-balancing. #include "GB_kron.h" #include "GB_emult.h" #define GB_FREE_WORK \ { \ GB_phbix_free (A2) ; \ GB_phbix_free (B2) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_phbix_free (C) ; \ } GrB_Info GB_kroner // C = kron (A,B) ( GrB_Matrix C, // output matrix (static header) const bool C_is_csc, // desired format of C const GrB_BinaryOp op, // multiply operator const GrB_Matrix A_in, // input matrix bool A_is_pattern, // true if values of A are not used const GrB_Matrix B_in, // input matrix bool B_is_pattern, // true if values of B are not used GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && C->static_header) ; struct GB_Matrix_opaque A2_header, B2_header ; GrB_Matrix A2 = GB_clear_static_header (&A2_header) ; GrB_Matrix B2 = GB_clear_static_header (&B2_header) ; ASSERT_MATRIX_OK (A_in, "A_in for kron (A,B)", GB0) ; ASSERT_MATRIX_OK (B_in, "B_in for kron (A,B)", GB0) ; ASSERT_BINARYOP_OK (op, "op for kron (A,B)", GB0) ; //-------------------------------------------------------------------------- // finish any pending work //-------------------------------------------------------------------------- GB_MATRIX_WAIT (A_in) ; GB_MATRIX_WAIT (B_in) ; //-------------------------------------------------------------------------- // bitmap case: create sparse copies of A and B if they are bitmap //-------------------------------------------------------------------------- GrB_Matrix A = A_in ; if (GB_IS_BITMAP (A)) { GBURBLE ("A:") ; // set A2->iso = A->iso OK: no need for burble GB_OK (GB_dup_worker (&A2, A->iso, A, true, NULL, Context)) ; ASSERT_MATRIX_OK (A2, "dup A2 for kron (A,B)", GB0) ; GB_OK (GB_convert_bitmap_to_sparse (A2, Context)) ; ASSERT_MATRIX_OK (A2, "to sparse, A2 for kron (A,B)", GB0) ; A = A2 ; } GrB_Matrix B = B_in ; if (GB_IS_BITMAP (B)) { GBURBLE ("B:") ; // set B2->iso = B->iso OK: no need for burble GB_OK (GB_dup_worker (&B2, B->iso, B, true, NULL, Context)) ; ASSERT_MATRIX_OK (B2, "dup B2 for kron (A,B)", GB0) ; GB_OK (GB_convert_bitmap_to_sparse (B2, Context)) ; ASSERT_MATRIX_OK (B2, "to sparse, A2 for kron (A,B)", GB0) ; B = B2 ; } //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const GB_void *restrict Ax = A_is_pattern ? NULL : ((GB_void *) A->x) ; const int64_t asize = A->type->size ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; int64_t anvec = A->nvec ; int64_t anz = GB_nnz (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int64_t *restrict Bi = B->i ; const GB_void *restrict Bx = B_is_pattern ? NULL : ((GB_void *) B->x) ; const int64_t bsize = B->type->size ; const int64_t bvlen = B->vlen ; const int64_t bvdim = B->vdim ; int64_t bnvec = B->nvec ; int64_t bnz = GB_nnz (B) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- double work = ((double) anz) * ((double) bnz) + (((double) anvec) * ((double) bnvec)) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- GrB_Type ctype = op->ztype ; const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate the output matrix C //-------------------------------------------------------------------------- // C has the same type as z for the multiply operator, z=op(x,y) GrB_Index cvlen, cvdim, cnzmax, cnvec ; bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ; ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ; ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ; ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ; ASSERT (ok) ; if (C_iso) { // the values of A and B are no longer needed if C is iso GBURBLE ("(iso kron) ") ; A_is_pattern = true ; B_is_pattern = true ; } // C is hypersparse if either A or B are hypersparse. It is never bitmap. bool C_is_hyper = (cvdim > 1) && (Ah != NULL || Bh != NULL) ; bool C_is_full = GB_as_if_full (A) && GB_as_if_full (B) ; int sparsity = C_is_full ? GxB_FULL : ((C_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE) ; // set C->iso = C_iso OK GB_OK (GB_new_bix (&C, true, // full, sparse, or hyper; static header ctype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, sparsity, true, B->hyper_switch, cnvec, cnzmax, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // get C and the operator //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = (GB_void *) C->x ; int64_t *restrict Cx_int64 = NULL ; int32_t *restrict Cx_int32 = NULL ; GxB_binary_function fmult = op->function ; GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; GB_cast_function cast_A = NULL, cast_B = NULL ; if (!A_is_pattern) { cast_A = GB_cast_factory (op->xtype->code, A->type->code) ; } if (!B_is_pattern) { cast_B = GB_cast_factory (op->ytype->code, B->type->code) ; } int64_t offset = 0 ; if (op_is_positional) { offset = GB_positional_offset (opcode) ; Cx_int64 = (int64_t *) Cx ; Cx_int32 = (int32_t *) Cx ; } bool is64 = (ctype == GrB_INT64) ; //-------------------------------------------------------------------------- // compute the column counts of C, and C->h if C is hypersparse //-------------------------------------------------------------------------- int64_t kC ; if (!C_is_full) { for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t aknz = (Ap == NULL) ? avlen : (Ap [kA+1] - Ap [kA]) ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t bknz = (Bp == NULL) ? bvlen : (Bp [kB+1] - Bp [kB]) ; // determine # entries in C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; if (!C_is_full) { Cp [kC] = aknz * bknz ; } if (C_is_hyper) { Ch [kC] = jA * bvdim + jB ; } } GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads, Context) ; if (C_is_hyper) C->nvec = cnvec ; } C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // C = kron (A,B) where C is iso and full //-------------------------------------------------------------------------- if (C_iso) { // Cx [0] = cscalar = op (A,B) memcpy (C->x, cscalar, csize) ; if (C_is_full) { // no more work to do if C is iso and full ASSERT_MATRIX_OK (C, "C=kron(A,B), iso full", GB0) ; GB_FREE_WORK ; return (GrB_SUCCESS) ; } } //-------------------------------------------------------------------------- // C = kron (A,B) //-------------------------------------------------------------------------- const bool A_iso = A->iso ; const bool B_iso = B->iso ; for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t pB_start = GBP (Bp, kB, bvlen) ; int64_t pB_end = GBP (Bp, kB+1, bvlen) ; int64_t bknz = pB_start - pB_end ; if (bknz == 0) continue ; GB_void bwork [GB_VLA(bsize)] ; if (!B_is_pattern && B_iso) { cast_B (bwork, Bx, bsize) ; } // get C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; int64_t pC = GBP (Cp, kC, cvlen) ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t pA_start = GBP (Ap, kA, avlen) ; int64_t pA_end = GBP (Ap, kA+1, avlen) ; GB_void awork [GB_VLA(asize)] ; if (!A_is_pattern && A_iso) { cast_A (awork, Ax, asize) ; } for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // awork = A(iA,jA), typecasted to op->xtype int64_t iA = GBI (Ai, pA, avlen) ; int64_t iAblock = iA * bvlen ; if (!A_is_pattern && !A_iso) { cast_A (awork, Ax + (pA*asize), asize) ; } for (int64_t pB = pB_start ; pB < pB_end ; pB++) { // bwork = B(iB,jB), typecasted to op->ytype int64_t iB = GBI (Bi, pB, bvlen) ; if (!B_is_pattern && !B_iso) { cast_B (bwork, Bx +(pB*bsize), bsize) ; } // C(iC,jC) = A(iA,jA) * B(iB,jB) if (!C_is_full) { int64_t iC = iAblock + iB ; Ci [pC] = iC ; } if (op_is_positional) { // positional binary operator switch (opcode) { case GB_FIRSTI_opcode : // z = first_i(A(iA,jA),y) == iA case GB_FIRSTI1_opcode : // z = first_i1(A(iA,jA),y) == iA+1 if (is64) { Cx_int64 [pC] = iA + offset ; } else { Cx_int32 [pC] = (int32_t) (iA + offset) ; } break ; case GB_FIRSTJ_opcode : // z = first_j(A(iA,jA),y) == jA case GB_FIRSTJ1_opcode : // z = first_j1(A(iA,jA),y) == jA+1 if (is64) { Cx_int64 [pC] = jA + offset ; } else { Cx_int32 [pC] = (int32_t) (jA + offset) ; } break ; case GB_SECONDI_opcode : // z = second_i(x,B(iB,jB)) == iB case GB_SECONDI1_opcode : // z = second_i1(x,B(iB,jB)) == iB+1 if (is64) { Cx_int64 [pC] = iB + offset ; } else { Cx_int32 [pC] = (int32_t) (iB + offset) ; } break ; case GB_SECONDJ_opcode : // z = second_j(x,B(iB,jB)) == jB case GB_SECONDJ1_opcode : // z = second_j1(x,B(iB,jB)) == jB+1 if (is64) { Cx_int64 [pC] = jB + offset ; } else { Cx_int32 [pC] = (int32_t) (jB + offset) ; } break ; default: ; } } else if (!C_iso) { // standard binary operator fmult (Cx +(pC*csize), awork, bwork) ; } pC++ ; } } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C=kron(A,B)", GB0) ; GB_FREE_WORK ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // GB_kroner: Kronecker product, C = kron (A,B) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = kron(A,B) where op determines the binary multiplier to use. The type of // A and B are compatible with the x and y inputs of z=op(x,y), but can be // different. The type of C is the type of z. C is hypersparse if either A // or B are hypersparse. // FUTURE: this would be faster with built-in types and operators. // FUTURE: at most one thread is used for each vector of C=kron(A,B). The // matrix C is normally very large, but if both A and B are n-by-1, then C is // n^2-by-1 and only a single thread is used. A better method for this case // would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not // accounted for in the parallel load-balancing. #include "GB_kron.h" #include "GB_emult.h" #define GB_FREE_WORK \ { \ GB_phbix_free (A2) ; \ GB_phbix_free (B2) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_phbix_free (C) ; \ } GrB_Info GB_kroner // C = kron (A,B) ( GrB_Matrix C, // output matrix (static header) const bool C_is_csc, // desired format of C const GrB_BinaryOp op, // multiply operator const GrB_Matrix A_in, // input matrix bool A_is_pattern, // true if values of A are not used const GrB_Matrix B_in, // input matrix bool B_is_pattern, // true if values of B are not used GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && C->static_header) ; struct GB_Matrix_opaque A2_header, B2_header ; GrB_Matrix A2 = GB_clear_static_header (&A2_header) ; GrB_Matrix B2 = GB_clear_static_header (&B2_header) ; ASSERT_MATRIX_OK (A_in, "A_in for kron (A,B)", GB0) ; ASSERT_MATRIX_OK (B_in, "B_in for kron (A,B)", GB0) ; ASSERT_BINARYOP_OK (op, "op for kron (A,B)", GB0) ; //-------------------------------------------------------------------------- // finish any pending work //-------------------------------------------------------------------------- GB_MATRIX_WAIT (A_in) ; GB_MATRIX_WAIT (B_in) ; //-------------------------------------------------------------------------- // bitmap case: create sparse copies of A and B if they are bitmap //-------------------------------------------------------------------------- GrB_Matrix A = A_in ; if (GB_IS_BITMAP (A)) { GBURBLE ("A:") ; // set A2->iso = A->iso OK: no need for burble GB_OK (GB_dup_worker (&A2, A->iso, A, true, NULL, Context)) ; ASSERT_MATRIX_OK (A2, "dup A2 for kron (A,B)", GB0) ; GB_OK (GB_convert_bitmap_to_sparse (A2, Context)) ; ASSERT_MATRIX_OK (A2, "to sparse, A2 for kron (A,B)", GB0) ; A = A2 ; } GrB_Matrix B = B_in ; if (GB_IS_BITMAP (B)) { GBURBLE ("B:") ; // set B2->iso = B->iso OK: no need for burble GB_OK (GB_dup_worker (&B2, B->iso, B, true, NULL, Context)) ; ASSERT_MATRIX_OK (B2, "dup B2 for kron (A,B)", GB0) ; GB_OK (GB_convert_bitmap_to_sparse (B2, Context)) ; ASSERT_MATRIX_OK (B2, "to sparse, A2 for kron (A,B)", GB0) ; B = B2 ; } //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const GB_void *restrict Ax = A_is_pattern ? NULL : ((GB_void *) A->x) ; const int64_t asize = A->type->size ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; int64_t anvec = A->nvec ; int64_t anz = GB_nnz (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int64_t *restrict Bi = B->i ; const GB_void *restrict Bx = B_is_pattern ? NULL : ((GB_void *) B->x) ; const int64_t bsize = B->type->size ; const int64_t bvlen = B->vlen ; const int64_t bvdim = B->vdim ; int64_t bnvec = B->nvec ; int64_t bnz = GB_nnz (B) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- double work = ((double) anz) * ((double) bnz) + (((double) anvec) * ((double) bnvec)) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // check if C is iso and compute its iso value if it is //-------------------------------------------------------------------------- GrB_Type ctype = op->ztype ; const size_t csize = ctype->size ; GB_void cscalar [GB_VLA(csize)] ; bool C_iso = GB_iso_emult (cscalar, ctype, A, B, op) ; //-------------------------------------------------------------------------- // allocate the output matrix C //-------------------------------------------------------------------------- // C has the same type as z for the multiply operator, z=op(x,y) GrB_Index cvlen, cvdim, cnzmax, cnvec ; bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ; ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ; ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ; ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ; ASSERT (ok) ; if (C_iso) { // the values of A and B are no longer needed if C is iso GBURBLE ("(iso kron) ") ; A_is_pattern = true ; B_is_pattern = true ; } // C is hypersparse if either A or B are hypersparse. It is never bitmap. bool C_is_hyper = (cvdim > 1) && (Ah != NULL || Bh != NULL) ; bool C_is_full = GB_as_if_full (A) && GB_as_if_full (B) ; int sparsity = C_is_full ? GxB_FULL : ((C_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE) ; // set C->iso = C_iso OK GB_OK (GB_new_bix (&C, true, // full, sparse, or hyper; static header ctype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, sparsity, true, B->hyper_switch, cnvec, cnzmax, true, C_iso, Context)) ; //-------------------------------------------------------------------------- // get C and the operator //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = (GB_void *) C->x ; int64_t *restrict Cx_int64 = NULL ; int32_t *restrict Cx_int32 = NULL ; GxB_binary_function fmult = op->function ; GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; GB_cast_function cast_A = NULL, cast_B = NULL ; if (!A_is_pattern) { cast_A = GB_cast_factory (op->xtype->code, A->type->code) ; } if (!B_is_pattern) { cast_B = GB_cast_factory (op->ytype->code, B->type->code) ; } int64_t offset = 0 ; if (op_is_positional) { offset = GB_positional_offset (opcode) ; Cx_int64 = (int64_t *) Cx ; Cx_int32 = (int32_t *) Cx ; } bool is64 = (ctype == GrB_INT64) ; //-------------------------------------------------------------------------- // compute the column counts of C, and C->h if C is hypersparse //-------------------------------------------------------------------------- int64_t kC ; if (!C_is_full) { #pragma omp parallel for num_threads(nthreads) schedule(guided) for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t aknz = (Ap == NULL) ? avlen : (Ap [kA+1] - Ap [kA]) ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t bknz = (Bp == NULL) ? bvlen : (Bp [kB+1] - Bp [kB]) ; // determine # entries in C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; if (!C_is_full) { Cp [kC] = aknz * bknz ; } if (C_is_hyper) { Ch [kC] = jA * bvdim + jB ; } } GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads, Context) ; if (C_is_hyper) C->nvec = cnvec ; } C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // C = kron (A,B) where C is iso and full //-------------------------------------------------------------------------- if (C_iso) { // Cx [0] = cscalar = op (A,B) memcpy (C->x, cscalar, csize) ; if (C_is_full) { // no more work to do if C is iso and full ASSERT_MATRIX_OK (C, "C=kron(A,B), iso full", GB0) ; GB_FREE_WORK ; return (GrB_SUCCESS) ; } } //-------------------------------------------------------------------------- // C = kron (A,B) //-------------------------------------------------------------------------- const bool A_iso = A->iso ; const bool B_iso = B->iso ; #pragma omp parallel for num_threads(nthreads) schedule(guided) for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t pB_start = GBP (Bp, kB, bvlen) ; int64_t pB_end = GBP (Bp, kB+1, bvlen) ; int64_t bknz = pB_start - pB_end ; if (bknz == 0) continue ; GB_void bwork [GB_VLA(bsize)] ; if (!B_is_pattern && B_iso) { cast_B (bwork, Bx, bsize) ; } // get C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; int64_t pC = GBP (Cp, kC, cvlen) ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t pA_start = GBP (Ap, kA, avlen) ; int64_t pA_end = GBP (Ap, kA+1, avlen) ; GB_void awork [GB_VLA(asize)] ; if (!A_is_pattern && A_iso) { cast_A (awork, Ax, asize) ; } for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // awork = A(iA,jA), typecasted to op->xtype int64_t iA = GBI (Ai, pA, avlen) ; int64_t iAblock = iA * bvlen ; if (!A_is_pattern && !A_iso) { cast_A (awork, Ax + (pA*asize), asize) ; } for (int64_t pB = pB_start ; pB < pB_end ; pB++) { // bwork = B(iB,jB), typecasted to op->ytype int64_t iB = GBI (Bi, pB, bvlen) ; if (!B_is_pattern && !B_iso) { cast_B (bwork, Bx +(pB*bsize), bsize) ; } // C(iC,jC) = A(iA,jA) * B(iB,jB) if (!C_is_full) { int64_t iC = iAblock + iB ; Ci [pC] = iC ; } if (op_is_positional) { // positional binary operator switch (opcode) { case GB_FIRSTI_opcode : // z = first_i(A(iA,jA),y) == iA case GB_FIRSTI1_opcode : // z = first_i1(A(iA,jA),y) == iA+1 if (is64) { Cx_int64 [pC] = iA + offset ; } else { Cx_int32 [pC] = (int32_t) (iA + offset) ; } break ; case GB_FIRSTJ_opcode : // z = first_j(A(iA,jA),y) == jA case GB_FIRSTJ1_opcode : // z = first_j1(A(iA,jA),y) == jA+1 if (is64) { Cx_int64 [pC] = jA + offset ; } else { Cx_int32 [pC] = (int32_t) (jA + offset) ; } break ; case GB_SECONDI_opcode : // z = second_i(x,B(iB,jB)) == iB case GB_SECONDI1_opcode : // z = second_i1(x,B(iB,jB)) == iB+1 if (is64) { Cx_int64 [pC] = iB + offset ; } else { Cx_int32 [pC] = (int32_t) (iB + offset) ; } break ; case GB_SECONDJ_opcode : // z = second_j(x,B(iB,jB)) == jB case GB_SECONDJ1_opcode : // z = second_j1(x,B(iB,jB)) == jB+1 if (is64) { Cx_int64 [pC] = jB + offset ; } else { Cx_int32 [pC] = (int32_t) (jB + offset) ; } break ; default: ; } } else if (!C_iso) { // standard binary operator fmult (Cx +(pC*csize), awork, bwork) ; } pC++ ; } } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C=kron(A,B)", GB0) ; GB_FREE_WORK ; return (GrB_SUCCESS) ; }
matrix_openmp.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> void fillMatrix(float *mat, int min, int max, int size) { srand(time(NULL)); for (int i = 0; i < size; i++) { mat[i] = (rand() % (max - min + 1)) + min; } } void multiply() { // create the matrices, 2 that contain the numbers we want to multiply int row_matrix_1 = 50000; int row_matrix_2 = 50000; int col_matrix_1 = 50000; int col_matrix_2 = 50000; // creating 3rd matrix to hold the final value int row_matrix_3 = 50000; int col_matrix_3 = 50000; float *matrix_one = (float*) malloc(sizeof(float)*row_matrix_1*col_matrix_1); float *matrix_two = (float*) malloc(sizeof(float)*row_matrix_2*col_matrix_2); float *matrix_three = (float*) malloc(sizeof(float)*row_matrix_3*col_matrix_3); float sum = 0; // fill the matrices with random numbers fillMatrix(matrix_one, 1, 100, row_matrix_1 * col_matrix_1); fillMatrix(matrix_two, 1, 100, row_matrix_2 * col_matrix_2); // multiply the matrices // print that multiplication is happening printf("Multiplying the matrices...\n"); #pragma omp parallel for for (int i = 0; i < 10; i++) { #pragma omp parallel for private(row_matrix_1, col_matrix_1, col_matrix_2, col_matrix_3) for(int i = 0; i < row_matrix_1; i++ ){ for(int j = 0; j < col_matrix_2; j++){ sum = 0; #pragma omp parallel for reduction(+:sum) for(int k = 0; k < col_matrix_1; k++){ //printf("%f * %f = %f\n", matrix_one[i*col_matrix_1 + k], matrix_two[k*col_matrix_2 + j], sum); sum += matrix_one[i*col_matrix_1+k]* matrix_two[k*col_matrix_2+k]; } matrix_three[i* col_matrix_3 + j]=sum; } } } printf("Multiplication done\n"); // free the memory allocation free(matrix_one); free(matrix_two); free(matrix_three); // This code is contributed by Manish Kumar (mkumar2789) } int main(void) { // calls the multiple function // start clock clock_t start = clock(); multiply(); // stop clock clock_t end = clock(); double time_spent = (double)(end - start) / CLOCKS_PER_SEC; printf("Time: %f seconds\n", time_spent); }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> void fillMatrix(float *mat, int min, int max, int size) { srand(time(NULL)); for (int i = 0; i < size; i++) { mat[i] = (rand() % (max - min + 1)) + min; } } void multiply() { //create the matrices, 2 that contain the numbers we want to multiply int row_matrix_1 = 50000; int row_matrix_2 = 50000; int col_matrix_1 = 50000; int col_matrix_2 = 50000; //creating 3 rd matrix to hold the final value int row_matrix_3 = 50000; int col_matrix_3 = 50000; float *matrix_one = (float *)malloc(sizeof(float) * row_matrix_1 * col_matrix_1); float *matrix_two = (float *)malloc(sizeof(float) * row_matrix_2 * col_matrix_2); float *matrix_three = (float *)malloc(sizeof(float) * row_matrix_3 * col_matrix_3); float sum = 0; //fill the matrices with random numbers fillMatrix(matrix_one, 1, 100, row_matrix_1 * col_matrix_1); fillMatrix(matrix_two, 1, 100, row_matrix_2 * col_matrix_2); //multiply the matrices // print that multiplication is happening printf("Multiplying the matrices...\n"); for (int i = 0; i < 10; i++) { for (int i = 0; i < row_matrix_1; i++) { for (int j = 0; j < col_matrix_2; j++) { sum = 0; for (int k = 0; k < col_matrix_1; k++) { //printf("%f * %f = %f\n", matrix_one[i * col_matrix_1 + k], matrix_two[k * col_matrix_2 + j], sum); sum += matrix_one[i * col_matrix_1 + k] * matrix_two[k * col_matrix_2 + k]; } matrix_three[i * col_matrix_3 + j] = sum; } } } printf("Multiplication done\n"); //free the memory allocation free(matrix_one); free(matrix_two); free(matrix_three); //This code is contributed by Manish Kumar(mkumar2789) } int main(void) { //calls the multiple function // start clock clock_t start = clock(); multiply(); //stop clock clock_t end = clock(); double time_spent = (double)(end - start) / CLOCKS_PER_SEC; printf("Time: %f seconds\n", time_spent); }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> void fillMatrix(float *mat, int min, int max, int size) { srand(time(NULL)); for (int i = 0; i < size; i++) { mat[i] = (rand() % (max - min + 1)) + min; } } void multiply() { //create the matrices, 2 that contain the numbers we want to multiply int row_matrix_1 = 50000; int row_matrix_2 = 50000; int col_matrix_1 = 50000; int col_matrix_2 = 50000; //creating 3 rd matrix to hold the final value int row_matrix_3 = 50000; int col_matrix_3 = 50000; float *matrix_one = (float *)malloc(sizeof(float) * row_matrix_1 * col_matrix_1); float *matrix_two = (float *)malloc(sizeof(float) * row_matrix_2 * col_matrix_2); float *matrix_three = (float *)malloc(sizeof(float) * row_matrix_3 * col_matrix_3); float sum = 0; //fill the matrices with random numbers fillMatrix(matrix_one, 1, 100, row_matrix_1 * col_matrix_1); fillMatrix(matrix_two, 1, 100, row_matrix_2 * col_matrix_2); //multiply the matrices // print that multiplication is happening printf("Multiplying the matrices...\n"); #pragma omp parallel for for (int i = 0; i < 10; i++) { #pragma omp parallel for private(row_matrix_1, col_matrix_1, col_matrix_2, col_matrix_3) for (int i = 0; i < row_matrix_1; i++) { for (int j = 0; j < col_matrix_2; j++) { sum = 0; #pragma omp parallel for reduction(+:sum) for (int k = 0; k < col_matrix_1; k++) { //printf("%f * %f = %f\n", matrix_one[i * col_matrix_1 + k], matrix_two[k * col_matrix_2 + j], sum); sum += matrix_one[i * col_matrix_1 + k] * matrix_two[k * col_matrix_2 + k]; } matrix_three[i * col_matrix_3 + j] = sum; } } } printf("Multiplication done\n"); //free the memory allocation free(matrix_one); free(matrix_two); free(matrix_three); //This code is contributed by Manish Kumar(mkumar2789) } int main(void) { //calls the multiple function // start clock clock_t start = clock(); multiply(); //stop clock clock_t end = clock(); double time_spent = (double)(end - start) / CLOCKS_PER_SEC; printf("Time: %f seconds\n", time_spent); }
GB_unop__exp_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp_fc32_fc32) // op(A') function: GB (_unop_tran__exp_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = cexpf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cexpf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = cexpf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp_fc32_fc32) // op(A') function: GB (_unop_tran__exp_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = cexpf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cexpf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = cexpf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp_fc32_fc32) // op(A') function: GB (_unop_tran__exp_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = cexpf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cexpf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = cexpf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp-hello-world.c
/***************************************************************************** Example : omp-hello-world.c Objective : OpenMP program to print "Hello World" This example demonstrates the use of omp_get_thread_num() omp_get_num_threads() calls Input : Set the number of threads to use by means of the OMP_NUM_THREADS environment variable. For C shell use command : setenv OMP_NUM_THREADS 4 For bash shell use command : export OMP_NUM_THREADS=4. Output : Each thread prints a message "Hello World" and its identifier. Created : Aug 2011 Author : RarchK *********************************************************************************/ #include<stdio.h> #include<stdlib.h> #include<omp.h> /* Main Program */ int main(int argc , char **argv) { int Threadid, Noofthreads; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP program to print \"Hello World\" using OpenMP PARALLEL directives\n "); printf("\n\t\t..........................................................................\n"); /* Set the number of threads */ /* omp_set_num_threads(4); */ /* OpenMP Parallel Construct : Fork a team of threads */ #pragma omp parallel private(Threadid) { /* Obtain the thread id */ Threadid = omp_get_thread_num(); printf("\n\t\t Hello World is being printed by the thread : %d\n", Threadid); /* Master Thread Has Its Threadid 0 */ if (Threadid == 0) { Noofthreads = omp_get_num_threads(); printf("\n\t\t Master thread printing total number of threads for this execution are : %d\n", Noofthreads); } }/* All thread join Master thread */ return 0; }
/***************************************************************************** Example : omp-hello-world.c Objective : OpenMP program to print "Hello World" This example demonstrates the use of omp_get_thread_num() omp_get_num_threads() calls Input : Set the number of threads to use by means of the OMP_NUM_THREADS environment variable. For C shell use command : setenv OMP_NUM_THREADS 4 For bash shell use command : export OMP_NUM_THREADS=4. Output : Each thread prints a message "Hello World" and its identifier. Created : Aug 2011 Author : RarchK *********************************************************************************/ #include<stdio.h> #include<stdlib.h> #include<omp.h> /* Main Program */ int main(int argc, char **argv) { int Threadid, Noofthreads; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP program to print \"Hello World\" using OpenMP PARALLEL directives\n "); printf("\n\t\t..........................................................................\n"); /* Set the number of threads */ /* omp_set_num_threads(4); */ /* OpenMP Parallel Construct : Fork a team of threads */ /* Obtain the thread id */ Threadid = omp_get_thread_num(); printf("\n\t\t Hello World is being printed by the thread : %d\n", Threadid); /* Master Thread Has Its Threadid 0 */ if (Threadid == 0) { Noofthreads = omp_get_num_threads(); printf("\n\t\t Master thread printing total number of threads for this execution are : %d\n", Noofthreads); } /* All thread join Master thread */ return 0; }
/***************************************************************************** Example : omp-hello-world.c Objective : OpenMP program to print "Hello World" This example demonstrates the use of omp_get_thread_num() omp_get_num_threads() calls Input : Set the number of threads to use by means of the OMP_NUM_THREADS environment variable. For C shell use command : setenv OMP_NUM_THREADS 4 For bash shell use command : export OMP_NUM_THREADS=4. Output : Each thread prints a message "Hello World" and its identifier. Created : Aug 2011 Author : RarchK *********************************************************************************/ #include<stdio.h> #include<stdlib.h> #include<omp.h> /* Main Program */ int main(int argc, char **argv) { int Threadid, Noofthreads; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : OpenMP program to print \"Hello World\" using OpenMP PARALLEL directives\n "); printf("\n\t\t..........................................................................\n"); /* Set the number of threads */ /* omp_set_num_threads(4); */ /* OpenMP Parallel Construct : Fork a team of threads */ #pragma omp parallel private(Threadid) { /* Obtain the thread id */ Threadid = omp_get_thread_num(); printf("\n\t\t Hello World is being printed by the thread : %d\n", Threadid); /* Master Thread Has Its Threadid 0 */ if (Threadid == 0) { Noofthreads = omp_get_num_threads(); printf("\n\t\t Master thread printing total number of threads for this execution are : %d\n", Noofthreads); } } /* All thread join Master thread */ return 0; }
ParallelFor.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <cstdint> #include <type_traits> #include "open3d/core/Device.h" #include "open3d/utility/Logging.h" #include "open3d/utility/Overload.h" #include "open3d/utility/Parallel.h" #include "open3d/utility/Preprocessor.h" #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include "open3d/core/CUDAUtils.h" #endif namespace open3d { namespace core { #ifdef __CUDACC__ static constexpr int64_t OPEN3D_PARFOR_BLOCK = 128; static constexpr int64_t OPEN3D_PARFOR_THREAD = 4; /// Calls f(n) with the "grid-stride loops" pattern. template <int64_t block_size, int64_t thread_size, typename func_t> __global__ void ElementWiseKernel_(int64_t n, func_t f) { int64_t items_per_block = block_size * thread_size; int64_t idx = blockIdx.x * items_per_block + threadIdx.x; #pragma unroll for (int64_t i = 0; i < thread_size; ++i) { if (idx < n) { f(idx); idx += block_size; } } } /// Run a function in parallel on CUDA. template <typename func_t> void ParallelForCUDA_(const Device& device, int64_t n, const func_t& func) { if (device.GetType() != Device::DeviceType::CUDA) { utility::LogError("ParallelFor for CUDA cannot run on device {}.", device.ToString()); } if (n == 0) { return; } CUDAScopedDevice scoped_device(device); int64_t items_per_block = OPEN3D_PARFOR_BLOCK * OPEN3D_PARFOR_THREAD; int64_t grid_size = (n + items_per_block - 1) / items_per_block; ElementWiseKernel_<OPEN3D_PARFOR_BLOCK, OPEN3D_PARFOR_THREAD> <<<grid_size, OPEN3D_PARFOR_BLOCK, 0, core::cuda::GetStream()>>>( n, func); OPEN3D_GET_LAST_CUDA_ERROR("ParallelFor failed."); } #else /// Run a function in parallel on CPU. template <typename func_t> void ParallelForCPU_(const Device& device, int64_t n, const func_t& func) { if (device.GetType() != Device::DeviceType::CPU) { utility::LogError("ParallelFor for CPU cannot run on device {}.", device.ToString()); } if (n == 0) { return; } #pragma omp parallel for num_threads(utility::EstimateMaxThreads()) for (int64_t i = 0; i < n; ++i) { func(i); } } #endif /// Run a function in parallel on CPU or CUDA. /// /// \param device The device for the parallel for loop to run on. /// \param n The number of workloads. /// \param func The function to be executed in parallel. The function should /// take an int64_t workload index and returns void, i.e., `void func(int64_t)`. /// /// \note This is optimized for uniform work items, i.e. where each call to \p /// func takes the same time. /// \note If you use a lambda function, capture only the required variables /// instead of all to prevent accidental race conditions. If you want the /// kernel to be used on both CPU and CUDA, capture the variables by value. template <typename func_t> void ParallelFor(const Device& device, int64_t n, const func_t& func) { #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else ParallelForCPU_(device, n, func); #endif } /// Run a potentially vectorized function in parallel on CPU or CUDA. /// /// \param device The device for the parallel for loop to run on. /// \param n The number of workloads. /// \param func The function to be executed in parallel. The function should /// take an int64_t workload index and returns void, i.e., `void func(int64_t)`. /// \param vec_func The vectorized function to be executed in parallel. The /// function should be provided using the OPEN3D_VECTORIZED macro, e.g., /// `OPEN3D_VECTORIZED(MyISPCKernel, some_used_variable)`. /// /// \note This is optimized for uniform work items, i.e. where each call to \p /// func takes the same time. /// \note If you use a lambda function, capture only the required variables /// instead of all to prevent accidental race conditions. If you want the /// kernel to be used on both CPU and CUDA, capture the variables by value. /// /// Example: /// /// \code /// /* MyFile.cpp */ /// #ifdef BUILD_ISPC_MODULE /// #include "MyFile_ispc.h" /// #endif /// /// std::vector<float> v(1000); /// float fill_value = 42.0f; /// core::ParallelFor( /// core::Device("CPU:0"), /// v.size(), /// [&](int64_t idx) { v[idx] = fill_value; }, /// OPEN3D_VECTORIZED(MyFillKernel, v.data(), fill_value)); /// /// /* MyFile.ispc */ /// #include "open3d/core/ParallelFor.isph" /// /// static inline void MyFillFunction(int64_t idx, /// float* uniform v, /// uniform float fill_value) { /// v[idx] = fill_value; /// } /// /// OPEN3D_EXPORT_VECTORIZED(MyFillKernel, /// MyFillFunction, /// float* uniform, /// uniform float) /// \endcode template <typename vec_func_t, typename func_t> void ParallelFor(const Device& device, int64_t n, const func_t& func, const vec_func_t& vec_func) { #ifdef BUILD_ISPC_MODULE #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else int num_threads = utility::EstimateMaxThreads(); ParallelForCPU_(device, num_threads, [&](int64_t i) { int64_t start = n * i / num_threads; int64_t end = std::min<int64_t>(n * (i + 1) / num_threads, n); vec_func(start, end); }); #endif #else #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else ParallelForCPU_(device, n, func); #endif #endif } #ifdef BUILD_ISPC_MODULE // Internal helper macro. #define OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, ...) \ using namespace ispc; \ ISPCKernel(start, end, __VA_ARGS__); #else // Internal helper macro. #define OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, ...) \ utility::LogError( \ "ISPC module disabled. Unable to call vectorized kernel {}", \ OPEN3D_STRINGIFY(ISPCKernel)); #endif /// Internal helper macro. #define OPEN3D_OVERLOADED_LAMBDA_(T, ISPCKernel, ...) \ [&](T, int64_t start, int64_t end) { \ OPEN3D_CALL_ISPC_KERNEL_( \ OPEN3D_CONCAT(ISPCKernel, OPEN3D_CONCAT(_, T)), start, end, \ __VA_ARGS__); \ } /// OPEN3D_VECTORIZED(ISPCKernel, ...) /// /// Defines a lambda function to call the provided kernel. /// /// Use the OPEN3D_EXPORT_TEMPLATE_VECTORIZED macro to define the /// kernel in the ISPC source file. /// /// Note: The arguments to the kernel only have to exist if ISPC support is /// enabled via BUILD_ISPC_MODULE=ON. #define OPEN3D_VECTORIZED(ISPCKernel, ...) \ [&](int64_t start, int64_t end) { \ OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, __VA_ARGS__); \ } /// OPEN3D_TEMPLATE_VECTORIZED(T, ISPCKernel, ...) /// /// Defines a lambda function to call the provided template-like kernel. /// Supported types: /// - bool /// - unsigned + signed {8,16,32,64} bit integers, /// - float, double /// /// Use the OPEN3D_EXPORT_TEMPLATE_VECTORIZED macro to define the /// kernel in the ISPC source file. /// /// Note: The arguments to the kernel only have to exist if ISPC support is /// enabled via BUILD_ISPC_MODULE=ON. #define OPEN3D_TEMPLATE_VECTORIZED(T, ISPCKernel, ...) \ [&](int64_t start, int64_t end) { \ static_assert(std::is_arithmetic<T>::value, \ "Data type is not an arithmetic type"); \ utility::Overload( \ OPEN3D_OVERLOADED_LAMBDA_(bool, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint8_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int8_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint16_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int16_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint32_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int32_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint64_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int64_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(float, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(double, ISPCKernel, __VA_ARGS__), \ [&](auto&& generic, int64_t start, int64_t end) { \ utility::LogError( \ "Unsupported data type {} for calling " \ "vectorized kernel {}", \ typeid(generic).name(), \ OPEN3D_STRINGIFY(ISPCKernel)); \ })(T{}, start, end); \ } } // namespace core } // namespace open3d
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <cstdint> #include <type_traits> #include "open3d/core/Device.h" #include "open3d/utility/Logging.h" #include "open3d/utility/Overload.h" #include "open3d/utility/Parallel.h" #include "open3d/utility/Preprocessor.h" #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include "open3d/core/CUDAUtils.h" #endif namespace open3d { namespace core { #ifdef __CUDACC__ static constexpr int64_t OPEN3D_PARFOR_BLOCK = 128; static constexpr int64_t OPEN3D_PARFOR_THREAD = 4; /// Calls f(n) with the "grid-stride loops" pattern. template <int64_t block_size, int64_t thread_size, typename func_t> __global__ void ElementWiseKernel_(int64_t n, func_t f) { int64_t items_per_block = block_size * thread_size; int64_t idx = blockIdx.x * items_per_block + threadIdx.x; #pragma unroll for (int64_t i = 0; i < thread_size; ++i) { if (idx < n) { f(idx); idx += block_size; } } } /// Run a function in parallel on CUDA. template <typename func_t> void ParallelForCUDA_(const Device& device, int64_t n, const func_t& func) { if (device.GetType() != Device::DeviceType::CUDA) { utility::LogError("ParallelFor for CUDA cannot run on device {}.", device.ToString()); } if (n == 0) { return; } CUDAScopedDevice scoped_device(device); int64_t items_per_block = OPEN3D_PARFOR_BLOCK * OPEN3D_PARFOR_THREAD; int64_t grid_size = (n + items_per_block - 1) / items_per_block; ElementWiseKernel_<OPEN3D_PARFOR_BLOCK, OPEN3D_PARFOR_THREAD> <<<grid_size, OPEN3D_PARFOR_BLOCK, 0, core::cuda::GetStream()>>>( n, func); OPEN3D_GET_LAST_CUDA_ERROR("ParallelFor failed."); } #else /// Run a function in parallel on CPU. template <typename func_t> void ParallelForCPU_(const Device& device, int64_t n, const func_t& func) { if (device.GetType() != Device::DeviceType::CPU) { utility::LogError("ParallelFor for CPU cannot run on device {}.", device.ToString()); } if (n == 0) { return; } for (int64_t i = 0; i < n; ++i) { func(i); } } #endif /// Run a function in parallel on CPU or CUDA. /// /// \param device The device for the parallel for loop to run on. /// \param n The number of workloads. /// \param func The function to be executed in parallel. The function should /// take an int64_t workload index and returns void, i.e., `void func(int64_t)`. /// /// \note This is optimized for uniform work items, i.e. where each call to \p /// func takes the same time. /// \note If you use a lambda function, capture only the required variables /// instead of all to prevent accidental race conditions. If you want the /// kernel to be used on both CPU and CUDA, capture the variables by value. template <typename func_t> void ParallelFor(const Device& device, int64_t n, const func_t& func) { #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else ParallelForCPU_(device, n, func); #endif } /// Run a potentially vectorized function in parallel on CPU or CUDA. /// /// \param device The device for the parallel for loop to run on. /// \param n The number of workloads. /// \param func The function to be executed in parallel. The function should /// take an int64_t workload index and returns void, i.e., `void func(int64_t)`. /// \param vec_func The vectorized function to be executed in parallel. The /// function should be provided using the OPEN3D_VECTORIZED macro, e.g., /// `OPEN3D_VECTORIZED(MyISPCKernel, some_used_variable)`. /// /// \note This is optimized for uniform work items, i.e. where each call to \p /// func takes the same time. /// \note If you use a lambda function, capture only the required variables /// instead of all to prevent accidental race conditions. If you want the /// kernel to be used on both CPU and CUDA, capture the variables by value. /// /// Example: /// /// \code /// /* MyFile.cpp */ /// #ifdef BUILD_ISPC_MODULE /// #include "MyFile_ispc.h" /// #endif /// /// std::vector<float> v(1000); /// float fill_value = 42.0f; /// core::ParallelFor( /// core::Device("CPU:0"), /// v.size(), /// [&](int64_t idx) { v[idx] = fill_value; }, /// OPEN3D_VECTORIZED(MyFillKernel, v.data(), fill_value)); /// /// /* MyFile.ispc */ /// #include "open3d/core/ParallelFor.isph" /// /// static inline void MyFillFunction(int64_t idx, /// float* uniform v, /// uniform float fill_value) { /// v[idx] = fill_value; /// } /// /// OPEN3D_EXPORT_VECTORIZED(MyFillKernel, /// MyFillFunction, /// float* uniform, /// uniform float) /// \endcode template <typename vec_func_t, typename func_t> void ParallelFor(const Device& device, int64_t n, const func_t& func, const vec_func_t& vec_func) { #ifdef BUILD_ISPC_MODULE #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else int num_threads = utility::EstimateMaxThreads(); ParallelForCPU_(device, num_threads, [&](int64_t i) { int64_t start = n * i / num_threads; int64_t end = std::min<int64_t>(n * (i + 1) / num_threads, n); vec_func(start, end); }); #endif #else #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else ParallelForCPU_(device, n, func); #endif #endif } #ifdef BUILD_ISPC_MODULE // Internal helper macro. #define OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, ...) \ using namespace ispc; \ ISPCKernel(start, end, __VA_ARGS__); #else // Internal helper macro. #define OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, ...) \ utility::LogError( \ "ISPC module disabled. Unable to call vectorized kernel {}", \ OPEN3D_STRINGIFY(ISPCKernel)); #endif /// Internal helper macro. #define OPEN3D_OVERLOADED_LAMBDA_(T, ISPCKernel, ...) \ [&](T, int64_t start, int64_t end) { \ OPEN3D_CALL_ISPC_KERNEL_( \ OPEN3D_CONCAT(ISPCKernel, OPEN3D_CONCAT(_, T)), start, end, \ __VA_ARGS__); \ } /// OPEN3D_VECTORIZED(ISPCKernel, ...) /// /// Defines a lambda function to call the provided kernel. /// /// Use the OPEN3D_EXPORT_TEMPLATE_VECTORIZED macro to define the /// kernel in the ISPC source file. /// /// Note: The arguments to the kernel only have to exist if ISPC support is /// enabled via BUILD_ISPC_MODULE=ON. #define OPEN3D_VECTORIZED(ISPCKernel, ...) \ [&](int64_t start, int64_t end) { \ OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, __VA_ARGS__); \ } /// OPEN3D_TEMPLATE_VECTORIZED(T, ISPCKernel, ...) /// /// Defines a lambda function to call the provided template-like kernel. /// Supported types: /// - bool /// - unsigned + signed {8,16,32,64} bit integers, /// - float, double /// /// Use the OPEN3D_EXPORT_TEMPLATE_VECTORIZED macro to define the /// kernel in the ISPC source file. /// /// Note: The arguments to the kernel only have to exist if ISPC support is /// enabled via BUILD_ISPC_MODULE=ON. #define OPEN3D_TEMPLATE_VECTORIZED(T, ISPCKernel, ...) \ [&](int64_t start, int64_t end) { \ static_assert(std::is_arithmetic<T>::value, \ "Data type is not an arithmetic type"); \ utility::Overload( \ OPEN3D_OVERLOADED_LAMBDA_(bool, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint8_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int8_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint16_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int16_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint32_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int32_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint64_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int64_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(float, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(double, ISPCKernel, __VA_ARGS__), \ [&](auto&& generic, int64_t start, int64_t end) { \ utility::LogError( \ "Unsupported data type {} for calling " \ "vectorized kernel {}", \ typeid(generic).name(), \ OPEN3D_STRINGIFY(ISPCKernel)); \ })(T{}, start, end); \ } } // namespace core } // namespace open3d
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <cstdint> #include <type_traits> #include "open3d/core/Device.h" #include "open3d/utility/Logging.h" #include "open3d/utility/Overload.h" #include "open3d/utility/Parallel.h" #include "open3d/utility/Preprocessor.h" #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include "open3d/core/CUDAUtils.h" #endif namespace open3d { namespace core { #ifdef __CUDACC__ static constexpr int64_t OPEN3D_PARFOR_BLOCK = 128; static constexpr int64_t OPEN3D_PARFOR_THREAD = 4; /// Calls f(n) with the "grid-stride loops" pattern. template <int64_t block_size, int64_t thread_size, typename func_t> __global__ void ElementWiseKernel_(int64_t n, func_t f) { int64_t items_per_block = block_size * thread_size; int64_t idx = blockIdx.x * items_per_block + threadIdx.x; #pragma unroll for (int64_t i = 0; i < thread_size; ++i) { if (idx < n) { f(idx); idx += block_size; } } } /// Run a function in parallel on CUDA. template <typename func_t> void ParallelForCUDA_(const Device& device, int64_t n, const func_t& func) { if (device.GetType() != Device::DeviceType::CUDA) { utility::LogError("ParallelFor for CUDA cannot run on device {}.", device.ToString()); } if (n == 0) { return; } CUDAScopedDevice scoped_device(device); int64_t items_per_block = OPEN3D_PARFOR_BLOCK * OPEN3D_PARFOR_THREAD; int64_t grid_size = (n + items_per_block - 1) / items_per_block; ElementWiseKernel_<OPEN3D_PARFOR_BLOCK, OPEN3D_PARFOR_THREAD> <<<grid_size, OPEN3D_PARFOR_BLOCK, 0, core::cuda::GetStream()>>>( n, func); OPEN3D_GET_LAST_CUDA_ERROR("ParallelFor failed."); } #else /// Run a function in parallel on CPU. template <typename func_t> void ParallelForCPU_(const Device& device, int64_t n, const func_t& func) { if (device.GetType() != Device::DeviceType::CPU) { utility::LogError("ParallelFor for CPU cannot run on device {}.", device.ToString()); } if (n == 0) { return; } #pragma omp parallel for num_threads(utility::EstimateMaxThreads()) for (int64_t i = 0; i < n; ++i) { func(i); } } #endif /// Run a function in parallel on CPU or CUDA. /// /// \param device The device for the parallel for loop to run on. /// \param n The number of workloads. /// \param func The function to be executed in parallel. The function should /// take an int64_t workload index and returns void, i.e., `void func(int64_t)`. /// /// \note This is optimized for uniform work items, i.e. where each call to \p /// func takes the same time. /// \note If you use a lambda function, capture only the required variables /// instead of all to prevent accidental race conditions. If you want the /// kernel to be used on both CPU and CUDA, capture the variables by value. template <typename func_t> void ParallelFor(const Device& device, int64_t n, const func_t& func) { #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else ParallelForCPU_(device, n, func); #endif } /// Run a potentially vectorized function in parallel on CPU or CUDA. /// /// \param device The device for the parallel for loop to run on. /// \param n The number of workloads. /// \param func The function to be executed in parallel. The function should /// take an int64_t workload index and returns void, i.e., `void func(int64_t)`. /// \param vec_func The vectorized function to be executed in parallel. The /// function should be provided using the OPEN3D_VECTORIZED macro, e.g., /// `OPEN3D_VECTORIZED(MyISPCKernel, some_used_variable)`. /// /// \note This is optimized for uniform work items, i.e. where each call to \p /// func takes the same time. /// \note If you use a lambda function, capture only the required variables /// instead of all to prevent accidental race conditions. If you want the /// kernel to be used on both CPU and CUDA, capture the variables by value. /// /// Example: /// /// \code /// /* MyFile.cpp */ /// #ifdef BUILD_ISPC_MODULE /// #include "MyFile_ispc.h" /// #endif /// /// std::vector<float> v(1000); /// float fill_value = 42.0f; /// core::ParallelFor( /// core::Device("CPU:0"), /// v.size(), /// [&](int64_t idx) { v[idx] = fill_value; }, /// OPEN3D_VECTORIZED(MyFillKernel, v.data(), fill_value)); /// /// /* MyFile.ispc */ /// #include "open3d/core/ParallelFor.isph" /// /// static inline void MyFillFunction(int64_t idx, /// float* uniform v, /// uniform float fill_value) { /// v[idx] = fill_value; /// } /// /// OPEN3D_EXPORT_VECTORIZED(MyFillKernel, /// MyFillFunction, /// float* uniform, /// uniform float) /// \endcode template <typename vec_func_t, typename func_t> void ParallelFor(const Device& device, int64_t n, const func_t& func, const vec_func_t& vec_func) { #ifdef BUILD_ISPC_MODULE #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else int num_threads = utility::EstimateMaxThreads(); ParallelForCPU_(device, num_threads, [&](int64_t i) { int64_t start = n * i / num_threads; int64_t end = std::min<int64_t>(n * (i + 1) / num_threads, n); vec_func(start, end); }); #endif #else #ifdef __CUDACC__ ParallelForCUDA_(device, n, func); #else ParallelForCPU_(device, n, func); #endif #endif } #ifdef BUILD_ISPC_MODULE // Internal helper macro. #define OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, ...) \ using namespace ispc; \ ISPCKernel(start, end, __VA_ARGS__); #else // Internal helper macro. #define OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, ...) \ utility::LogError( \ "ISPC module disabled. Unable to call vectorized kernel {}", \ OPEN3D_STRINGIFY(ISPCKernel)); #endif /// Internal helper macro. #define OPEN3D_OVERLOADED_LAMBDA_(T, ISPCKernel, ...) \ [&](T, int64_t start, int64_t end) { \ OPEN3D_CALL_ISPC_KERNEL_( \ OPEN3D_CONCAT(ISPCKernel, OPEN3D_CONCAT(_, T)), start, end, \ __VA_ARGS__); \ } /// OPEN3D_VECTORIZED(ISPCKernel, ...) /// /// Defines a lambda function to call the provided kernel. /// /// Use the OPEN3D_EXPORT_TEMPLATE_VECTORIZED macro to define the /// kernel in the ISPC source file. /// /// Note: The arguments to the kernel only have to exist if ISPC support is /// enabled via BUILD_ISPC_MODULE=ON. #define OPEN3D_VECTORIZED(ISPCKernel, ...) \ [&](int64_t start, int64_t end) { \ OPEN3D_CALL_ISPC_KERNEL_(ISPCKernel, start, end, __VA_ARGS__); \ } /// OPEN3D_TEMPLATE_VECTORIZED(T, ISPCKernel, ...) /// /// Defines a lambda function to call the provided template-like kernel. /// Supported types: /// - bool /// - unsigned + signed {8,16,32,64} bit integers, /// - float, double /// /// Use the OPEN3D_EXPORT_TEMPLATE_VECTORIZED macro to define the /// kernel in the ISPC source file. /// /// Note: The arguments to the kernel only have to exist if ISPC support is /// enabled via BUILD_ISPC_MODULE=ON. #define OPEN3D_TEMPLATE_VECTORIZED(T, ISPCKernel, ...) \ [&](int64_t start, int64_t end) { \ static_assert(std::is_arithmetic<T>::value, \ "Data type is not an arithmetic type"); \ utility::Overload( \ OPEN3D_OVERLOADED_LAMBDA_(bool, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint8_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int8_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint16_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int16_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint32_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int32_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(uint64_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(int64_t, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(float, ISPCKernel, __VA_ARGS__), \ OPEN3D_OVERLOADED_LAMBDA_(double, ISPCKernel, __VA_ARGS__), \ [&](auto&& generic, int64_t start, int64_t end) { \ utility::LogError( \ "Unsupported data type {} for calling " \ "vectorized kernel {}", \ typeid(generic).name(), \ OPEN3D_STRINGIFY(ISPCKernel)); \ })(T{}, start, end); \ } } // namespace core } // namespace open3d
matrix_vector_functions_intel_mkl_ext.c
#include <stdio.h> #include "matrix_vector_functions_intel_mkl.h" #include "matrix_vector_functions_intel_mkl_ext.h" #include <math.h> #include "mkl.h" /* C = beta*C + alpha*A(1:Anrows, 1:Ancols)[T]*B(1:Bnrows, 1:Bncols)[T] */ void submatrix_submatrix_mult_with_ab(mat *A, mat *B, mat *C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb, double alpha, double beta) { int opAnrows, opAncols, opBnrows, opBncols; if (transa == CblasTrans) { opAnrows = Ancols; opAncols = Anrows; } else { opAnrows = Anrows; opAncols = Ancols; } if (transb == CblasTrans) { opBnrows = Bncols; opBncols = Bnrows; } else { opBnrows = Bnrows; opBncols = Bncols; } if (opAncols != opBnrows) { printf("error in submatrix_submatrix_mult()"); exit(0); } cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb, opAnrows, opBncols, // m, n, opAncols, // k alpha, A->d, A->nrows, // 1, A, rows of A as declared in memory B->d, B->nrows, // B, rows of B as declared in memory beta, C->d, C->nrows // 0, C, rows of C as declared. ); } void submatrix_submatrix_mult(mat *A, mat *B, mat *C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb) { double alpha, beta; alpha = 1.0; beta = 0.0; submatrix_submatrix_mult_with_ab(A, B, C, Anrows, Ancols, Bnrows, Bncols, transa, transb, alpha, beta); } /* D = M(:,inds)' */ void matrix_get_selected_columns_and_transpose(mat *M, int *inds, mat *Mc) { int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for(i=0; i<(Mc->nrows); i++){ col_vec = vector_new(M->nrows); matrix_get_col(M,inds[i],col_vec); matrix_set_row(Mc,i,col_vec); vector_delete(col_vec); } } } void matrix_set_selected_rows_with_transposed(mat *M, int *inds, mat *Mc) { int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for(i=0; i<(Mc->ncols); i++){ col_vec = vector_new(Mc->nrows); matrix_get_col(Mc,i,col_vec); matrix_set_row(M,inds[i],col_vec); vector_delete(col_vec); } } } void linear_solve_UTxb(mat *A, mat *b) { LAPACKE_dtrtrs(LAPACK_COL_MAJOR, 'U', 'T', 'N', // A->nrows, b->ncols, A->d, A->nrows, b->d, b->nrows ); } mat_coo* coo_matrix_new(int nrows, int ncols, int capacity) { mat_coo *M = (mat_coo*)malloc(sizeof(mat_coo)); M->values = (double*)calloc(capacity, sizeof(double)); M->rows = (int*)calloc(capacity, sizeof(int)); M->cols = (int*)calloc(capacity, sizeof(int)); M->nnz = 0; M->nrows = nrows; M->ncols = ncols; M->capacity = capacity; return M; } void coo_matrix_delete(mat_coo *M) { free(M->values); free(M->cols); free(M->rows); free(M); } void coo_matrix_print(mat_coo *M) { int i; for (i = 0; i < M->nnz; i++) { printf("(%d, %d: %f), ", *(M->rows+i), *(M->cols+i), *(M->values+i)); } printf("\n"); } // 0-based interface void set_coo_matrix_element(mat_coo *M, int row, int col, double val, int force_new) { if (!(row >= 0 && row < M->nrows && col >=0 && col < M->ncols)) { printf("error: wrong index\n"); exit(0); } if (!force_new) { int i; for (i = 0; i < M->nnz; i++) { if (*(M->rows + i) == row+1 && *(M->cols + i) == col+1) { *(M->values + i) = val; return; } } } if (M->nnz < M->capacity) { *(M->rows+M->nnz) = row+1; *(M->cols+M->nnz) = col+1; *(M->values+M->nnz) = val; M->nnz = M->nnz+1; return; } printf("error: capacity exceeded. capacity=%d, nnz=%d\n", M->capacity, M->nnz); exit(0); } void coo_matrix_matrix_mult(mat_coo *A, mat *B, mat *C) { /* void mkl_dcoomm ( const char *transa , const MKL_INT *m , const MKL_INT *n , const MKL_INT *k , const double *alpha , const char *matdescra , const double *val , const MKL_INT *rowind , const MKL_INT *colind , const MKL_INT *nnz , const double *b , const MKL_INT *ldb , const double *beta , double *c , const MKL_INT *ldc ); */ double alpha = 1.0, beta = 0.0; const char *trans = "N"; const char *metadescra = "GXXF"; mkl_dcoomm( trans, &(A->nrows), &(C->ncols), &(A->ncols), &(alpha), metadescra, A->values, A->rows, A->cols, &(A->nnz), B->d, &(B->nrows), &(beta), C->d, &(C->nrows)); } void coo_matrix_transpose_matrix_mult(mat_coo *A, mat *B, mat *C) { /* void mkl_dcoomm ( const char *transa , const MKL_INT *m , const MKL_INT *n , const MKL_INT *k , const double *alpha , const char *matdescra , const double *val , const MKL_INT *rowind , const MKL_INT *colind , const MKL_INT *nnz , const double *b , const MKL_INT *ldb , const double *beta , double *c , const MKL_INT *ldc ); */ double alpha = 1.0, beta = 0.0; const char *trans = "T"; const char *metadescra = "GXXF"; mkl_dcoomm( trans, &(A->nrows), &(C->ncols), &(A->ncols), &(alpha), metadescra, A->values, A->rows, A->cols, &(A->nnz), B->d, &(B->nrows), &(beta), C->d, &(C->nrows)); } void coo_matrix_copy_to_dense(mat_coo *A, mat *B) { int i, j; // printf("z1\n"); for (i = 0; i < B->nrows; i++) { for (j = 0; j < B->ncols; j++) { matrix_set_element(B, i, j, 0.0); } } // printf("z2\n"); for (i = 0; i < A->nnz; i++) { matrix_set_element(B, *(A->rows+i)-1, *(A->cols+i)-1, *(A->values+i) ); } // printf("z3\n"); } double get_rand_uniform(VSLStreamStatePtr stream) { double ans; vdRngUniform( VSL_RNG_METHOD_UNIFORM_STD , stream, 1, &ans, 0.0, 1.0); return ans; } double get_rand_normal(VSLStreamStatePtr stream) { double ans; vdRngUniform( VSL_RNG_METHOD_UNIFORM_STD , stream, 1, &ans, 0.0, 1.0); return ans; } void gen_rand_coo_matrix(mat_coo *M, double density) { VSLStreamStatePtr stream_u; VSLStreamStatePtr stream_n; // vslNewStream( &stream_u, BRNG, time(NULL)); // vslNewStream( &stream_n, BRNG, time(NULL)); vslNewStream( &stream_u, BRNG, 123); vslNewStream( &stream_n, BRNG, 456); int i, j; for (i = 0; i < M->nrows; i++) { for (j = 0; j < M->ncols; j++) { if (get_rand_uniform(stream_u) < density) { set_coo_matrix_element(M, i, j, get_rand_normal(stream_n), 1); } } } } void coo_matrix_sort_element(mat_coo *A) { int i, j; // seletion sort for (i = 0; i < A->nnz; i++) { for (j = i+1; j < A->nnz; j++) { if ( (A->rows[i] > A->rows[j]) || (A->rows[i] == A->rows[j] && A->cols[i] > A->cols[j]) ) { double dtemp; int itemp; itemp = A->rows[i]; A->rows[i] = A->rows[j]; A->rows[j] = itemp; itemp = A->cols[i]; A->cols[i] = A->cols[j]; A->cols[j] = itemp; dtemp = A->values[i]; A->values[i] = A->values[j]; A->values[j] = dtemp; } } } } void csr_matrix_delete(mat_csr *M) { free(M->values); free(M->cols); free(M->pointerB); free(M->pointerE); free(M); } void csr_matrix_print(mat_csr *M) { int i; printf("values: "); for (i = 0; i < M->nnz; i++) { printf("%f ", M->values[i]); } printf("\ncolumns: "); for (i = 0; i < M->nnz; i++) { printf("%d ", M->cols[i]); } printf("\npointerB: "); for (i = 0; i < M->nrows; i++) { printf("%d\t", M->pointerB[i]); } printf("\npointerE: "); for (i = 0; i < M->nrows; i++) { printf("%d\t", M->pointerE[i]); } printf("\n"); } mat_csr* csr_matrix_new() { mat_csr *M = (mat_csr*)malloc(sizeof(mat_csr)); return M; } void csr_init_from_coo(mat_csr *D, mat_coo *M) { D->nrows = M->nrows; D->ncols = M->ncols; D->pointerB = (int*)malloc(D->nrows*sizeof(int)); D->pointerE = (int*)malloc(D->nrows*sizeof(int)); D->cols = (int*)calloc(M->nnz, sizeof(int)); D->nnz = M->nnz; // coo_matrix_sort_element(M); D->values = (double*)malloc(M->nnz * sizeof(double)); memcpy(D->values, M->values, M->nnz * sizeof(double)); int current_row, cursor=0; for (current_row = 0; current_row < D->nrows; current_row++) { D->pointerB[current_row] = cursor+1; while (M->rows[cursor]-1 == current_row) { D->cols[cursor] = M->cols[cursor]; cursor++; } D->pointerE[current_row] = cursor+1; } } void csr_matrix_matrix_mult(mat_csr *A, mat *B, mat *C) { /* void mkl_dcsrmm ( const char *transa , const MKL_INT *m , const MKL_INT *n , const MKL_INT *k , const double *alpha , const char *matdescra , const double *val , const MKL_INT *indx , const MKL_INT *pntrb , const MKL_INT *pntre , const double *b , const MKL_INT *ldb , const double *beta , double *c , const MKL_INT *ldc ); */ char * transa = "N"; double alpha = 1.0, beta = 0.0; const char *matdescra = "GXXF"; mkl_dcsrmm(transa, &(A->nrows), &(C->ncols), &(A->ncols), &alpha, matdescra, A->values, A->cols, A->pointerB, A->pointerE, B->d, &(B->nrows), &beta, C->d, &(C->nrows)); } void csr_matrix_transpose_matrix_mult(mat_csr *A, mat *B, mat *C) { /* void mkl_dcsrmm ( const char *transa , const MKL_INT *m , const MKL_INT *n , const MKL_INT *k , const double *alpha , const char *matdescra , const double *val , const MKL_INT *indx , const MKL_INT *pntrb , const MKL_INT *pntre , const double *b , const MKL_INT *ldb , const double *beta , double *c , const MKL_INT *ldc ); */ char * transa = "T"; double alpha = 1.0, beta = 0.0; const char *matdescra = "GXXF"; mkl_dcsrmm(transa, &(A->nrows), &(C->ncols), &(A->ncols), &alpha, matdescra, A->values, A->cols, A->pointerB, A->pointerE, B->d, &(B->nrows), &beta, C->d, &(C->nrows)); }
#include <stdio.h> #include "matrix_vector_functions_intel_mkl.h" #include "matrix_vector_functions_intel_mkl_ext.h" #include <math.h> #include "mkl.h" /* C = beta*C + alpha*A(1:Anrows, 1:Ancols)[T]*B(1:Bnrows, 1:Bncols)[T] */ void submatrix_submatrix_mult_with_ab(mat * A, mat * B, mat * C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb, double alpha, double beta) { int opAnrows, opAncols, opBnrows, opBncols; if (transa == CblasTrans) { opAnrows = Ancols; opAncols = Anrows; } else { opAnrows = Anrows; opAncols = Ancols; } if (transb == CblasTrans) { opBnrows = Bncols; opBncols = Bnrows; } else { opBnrows = Bnrows; opBncols = Bncols; } if (opAncols != opBnrows) { printf("error in submatrix_submatrix_mult()"); exit(0); } cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE) transa, (CBLAS_TRANSPOSE) transb, opAnrows, opBncols, //m, n, opAncols, //k alpha, A->d, A->nrows, //1, A, rows of A as declared in memory B->d, B->nrows, //B, rows of B as declared in memory beta, C->d, C->nrows // 0, C, rows of C as declared. ); } void submatrix_submatrix_mult(mat * A, mat * B, mat * C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb) { double alpha, beta; alpha = 1.0; beta = 0.0; submatrix_submatrix_mult_with_ab(A, B, C, Anrows, Ancols, Bnrows, Bncols, transa, transb, alpha, beta); } /* D = M(:,inds)' */ void matrix_get_selected_columns_and_transpose(mat * M, int *inds, mat * Mc) { int i; vec *col_vec; for (i = 0; i < (Mc->nrows); i++) { col_vec = vector_new(M->nrows); matrix_get_col(M, inds[i], col_vec); matrix_set_row(Mc, i, col_vec); vector_delete(col_vec); } } void matrix_set_selected_rows_with_transposed(mat * M, int *inds, mat * Mc) { int i; vec *col_vec; for (i = 0; i < (Mc->ncols); i++) { col_vec = vector_new(Mc->nrows); matrix_get_col(Mc, i, col_vec); matrix_set_row(M, inds[i], col_vec); vector_delete(col_vec); } } void linear_solve_UTxb(mat * A, mat * b) { LAPACKE_dtrtrs(LAPACK_COL_MAJOR, 'U', 'T', 'N', // A->nrows, b->ncols, A->d, A->nrows, b->d, b->nrows ); } mat_coo * coo_matrix_new(int nrows, int ncols, int capacity) { mat_coo *M = (mat_coo *) malloc(sizeof(mat_coo)); M->values = (double *)calloc(capacity, sizeof(double)); M->rows = (int *)calloc(capacity, sizeof(int)); M->cols = (int *)calloc(capacity, sizeof(int)); M->nnz = 0; M->nrows = nrows; M->ncols = ncols; M->capacity = capacity; return M; } void coo_matrix_delete(mat_coo * M) { free(M->values); free(M->cols); free(M->rows); free(M); } void coo_matrix_print(mat_coo * M) { int i; for (i = 0; i < M->nnz; i++) { printf("(%d, %d: %f), ", *(M->rows + i), *(M->cols + i), *(M->values + i)); } printf("\n"); } //0 - based interface void set_coo_matrix_element(mat_coo * M, int row, int col, double val, int force_new) { if (!(row >= 0 && row < M->nrows && col >= 0 && col < M->ncols)) { printf("error: wrong index\n"); exit(0); } if (!force_new) { int i; for (i = 0; i < M->nnz; i++) { if (*(M->rows + i) == row + 1 && *(M->cols + i) == col + 1) { *(M->values + i) = val; return; } } } if (M->nnz < M->capacity) { *(M->rows + M->nnz) = row + 1; *(M->cols + M->nnz) = col + 1; *(M->values + M->nnz) = val; M->nnz = M->nnz + 1; return; } printf("error: capacity exceeded. capacity=%d, nnz=%d\n", M->capacity, M->nnz); exit(0); } void coo_matrix_matrix_mult(mat_coo * A, mat * B, mat * C) { /* * void mkl_dcoomm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *rowind , const MKL_INT * *colind , const MKL_INT *nnz , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ double alpha = 1.0, beta = 0.0; const char *trans = "N"; const char *metadescra = "GXXF"; mkl_dcoomm( trans, &(A->nrows), &(C->ncols), &(A->ncols), &(alpha), metadescra, A->values, A->rows, A->cols, &(A->nnz), B->d, &(B->nrows), &(beta), C->d, &(C->nrows)); } void coo_matrix_transpose_matrix_mult(mat_coo * A, mat * B, mat * C) { /* * void mkl_dcoomm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *rowind , const MKL_INT * *colind , const MKL_INT *nnz , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ double alpha = 1.0, beta = 0.0; const char *trans = "T"; const char *metadescra = "GXXF"; mkl_dcoomm( trans, &(A->nrows), &(C->ncols), &(A->ncols), &(alpha), metadescra, A->values, A->rows, A->cols, &(A->nnz), B->d, &(B->nrows), &(beta), C->d, &(C->nrows)); } void coo_matrix_copy_to_dense(mat_coo * A, mat * B) { int i, j; //printf("z1\n"); for (i = 0; i < B->nrows; i++) { for (j = 0; j < B->ncols; j++) { matrix_set_element(B, i, j, 0.0); } } //printf("z2\n"); for (i = 0; i < A->nnz; i++) { matrix_set_element(B, *(A->rows + i) - 1, *(A->cols + i) - 1, *(A->values + i)); } //printf("z3\n"); } double get_rand_uniform(VSLStreamStatePtr stream) { double ans; vdRngUniform(VSL_RNG_METHOD_UNIFORM_STD, stream, 1, &ans, 0.0, 1.0); return ans; } double get_rand_normal(VSLStreamStatePtr stream) { double ans; vdRngUniform(VSL_RNG_METHOD_UNIFORM_STD, stream, 1, &ans, 0.0, 1.0); return ans; } void gen_rand_coo_matrix(mat_coo * M, double density) { VSLStreamStatePtr stream_u; VSLStreamStatePtr stream_n; //vslNewStream(&stream_u, BRNG, time(NULL)); //vslNewStream(&stream_n, BRNG, time(NULL)); vslNewStream(&stream_u, BRNG, 123); vslNewStream(&stream_n, BRNG, 456); int i, j; for (i = 0; i < M->nrows; i++) { for (j = 0; j < M->ncols; j++) { if (get_rand_uniform(stream_u) < density) { set_coo_matrix_element(M, i, j, get_rand_normal(stream_n), 1); } } } } void coo_matrix_sort_element(mat_coo * A) { int i, j; //seletion sort for (i = 0; i < A->nnz; i++) { for (j = i + 1; j < A->nnz; j++) { if ((A->rows[i] > A->rows[j]) || (A->rows[i] == A->rows[j] && A->cols[i] > A->cols[j])) { double dtemp; int itemp; itemp = A->rows[i]; A->rows[i] = A->rows[j]; A->rows[j] = itemp; itemp = A->cols[i]; A->cols[i] = A->cols[j]; A->cols[j] = itemp; dtemp = A->values[i]; A->values[i] = A->values[j]; A->values[j] = dtemp; } } } } void csr_matrix_delete(mat_csr * M) { free(M->values); free(M->cols); free(M->pointerB); free(M->pointerE); free(M); } void csr_matrix_print(mat_csr * M) { int i; printf("values: "); for (i = 0; i < M->nnz; i++) { printf("%f ", M->values[i]); } printf("\ncolumns: "); for (i = 0; i < M->nnz; i++) { printf("%d ", M->cols[i]); } printf("\npointerB: "); for (i = 0; i < M->nrows; i++) { printf("%d\t", M->pointerB[i]); } printf("\npointerE: "); for (i = 0; i < M->nrows; i++) { printf("%d\t", M->pointerE[i]); } printf("\n"); } mat_csr * csr_matrix_new() { mat_csr *M = (mat_csr *) malloc(sizeof(mat_csr)); return M; } void csr_init_from_coo(mat_csr * D, mat_coo * M) { D->nrows = M->nrows; D->ncols = M->ncols; D->pointerB = (int *)malloc(D->nrows * sizeof(int)); D->pointerE = (int *)malloc(D->nrows * sizeof(int)); D->cols = (int *)calloc(M->nnz, sizeof(int)); D->nnz = M->nnz; //coo_matrix_sort_element(M); D->values = (double *)malloc(M->nnz * sizeof(double)); memcpy(D->values, M->values, M->nnz * sizeof(double)); int current_row, cursor = 0; for (current_row = 0; current_row < D->nrows; current_row++) { D->pointerB[current_row] = cursor + 1; while (M->rows[cursor] - 1 == current_row) { D->cols[cursor] = M->cols[cursor]; cursor++; } D->pointerE[current_row] = cursor + 1; } } void csr_matrix_matrix_mult(mat_csr * A, mat * B, mat * C) { /* * void mkl_dcsrmm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *indx , const MKL_INT * *pntrb , const MKL_INT *pntre , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ char *transa = "N"; double alpha = 1.0, beta = 0.0; const char *matdescra = "GXXF"; mkl_dcsrmm(transa, &(A->nrows), &(C->ncols), &(A->ncols), &alpha, matdescra, A->values, A->cols, A->pointerB, A->pointerE, B->d, &(B->nrows), &beta, C->d, &(C->nrows)); } void csr_matrix_transpose_matrix_mult(mat_csr * A, mat * B, mat * C) { /* * void mkl_dcsrmm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *indx , const MKL_INT * *pntrb , const MKL_INT *pntre , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ char *transa = "T"; double alpha = 1.0, beta = 0.0; const char *matdescra = "GXXF"; mkl_dcsrmm(transa, &(A->nrows), &(C->ncols), &(A->ncols), &alpha, matdescra, A->values, A->cols, A->pointerB, A->pointerE, B->d, &(B->nrows), &beta, C->d, &(C->nrows)); }
#include <stdio.h> #include "matrix_vector_functions_intel_mkl.h" #include "matrix_vector_functions_intel_mkl_ext.h" #include <math.h> #include "mkl.h" /* C = beta*C + alpha*A(1:Anrows, 1:Ancols)[T]*B(1:Bnrows, 1:Bncols)[T] */ void submatrix_submatrix_mult_with_ab(mat * A, mat * B, mat * C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb, double alpha, double beta) { int opAnrows, opAncols, opBnrows, opBncols; if (transa == CblasTrans) { opAnrows = Ancols; opAncols = Anrows; } else { opAnrows = Anrows; opAncols = Ancols; } if (transb == CblasTrans) { opBnrows = Bncols; opBncols = Bnrows; } else { opBnrows = Bnrows; opBncols = Bncols; } if (opAncols != opBnrows) { printf("error in submatrix_submatrix_mult()"); exit(0); } cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE) transa, (CBLAS_TRANSPOSE) transb, opAnrows, opBncols, //m, n, opAncols, //k alpha, A->d, A->nrows, //1, A, rows of A as declared in memory B->d, B->nrows, //B, rows of B as declared in memory beta, C->d, C->nrows // 0, C, rows of C as declared. ); } void submatrix_submatrix_mult(mat * A, mat * B, mat * C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb) { double alpha, beta; alpha = 1.0; beta = 0.0; submatrix_submatrix_mult_with_ab(A, B, C, Anrows, Ancols, Bnrows, Bncols, transa, transb, alpha, beta); } /* D = M(:,inds)' */ void matrix_get_selected_columns_and_transpose(mat * M, int *inds, mat * Mc) { int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for (i = 0; i < (Mc->nrows); i++) { col_vec = vector_new(M->nrows); matrix_get_col(M, inds[i], col_vec); matrix_set_row(Mc, i, col_vec); vector_delete(col_vec); } } } void matrix_set_selected_rows_with_transposed(mat * M, int *inds, mat * Mc) { int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for (i = 0; i < (Mc->ncols); i++) { col_vec = vector_new(Mc->nrows); matrix_get_col(Mc, i, col_vec); matrix_set_row(M, inds[i], col_vec); vector_delete(col_vec); } } } void linear_solve_UTxb(mat * A, mat * b) { LAPACKE_dtrtrs(LAPACK_COL_MAJOR, 'U', 'T', 'N', // A->nrows, b->ncols, A->d, A->nrows, b->d, b->nrows ); } mat_coo * coo_matrix_new(int nrows, int ncols, int capacity) { mat_coo *M = (mat_coo *) malloc(sizeof(mat_coo)); M->values = (double *)calloc(capacity, sizeof(double)); M->rows = (int *)calloc(capacity, sizeof(int)); M->cols = (int *)calloc(capacity, sizeof(int)); M->nnz = 0; M->nrows = nrows; M->ncols = ncols; M->capacity = capacity; return M; } void coo_matrix_delete(mat_coo * M) { free(M->values); free(M->cols); free(M->rows); free(M); } void coo_matrix_print(mat_coo * M) { int i; for (i = 0; i < M->nnz; i++) { printf("(%d, %d: %f), ", *(M->rows + i), *(M->cols + i), *(M->values + i)); } printf("\n"); } //0 - based interface void set_coo_matrix_element(mat_coo * M, int row, int col, double val, int force_new) { if (!(row >= 0 && row < M->nrows && col >= 0 && col < M->ncols)) { printf("error: wrong index\n"); exit(0); } if (!force_new) { int i; for (i = 0; i < M->nnz; i++) { if (*(M->rows + i) == row + 1 && *(M->cols + i) == col + 1) { *(M->values + i) = val; return; } } } if (M->nnz < M->capacity) { *(M->rows + M->nnz) = row + 1; *(M->cols + M->nnz) = col + 1; *(M->values + M->nnz) = val; M->nnz = M->nnz + 1; return; } printf("error: capacity exceeded. capacity=%d, nnz=%d\n", M->capacity, M->nnz); exit(0); } void coo_matrix_matrix_mult(mat_coo * A, mat * B, mat * C) { /* * void mkl_dcoomm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *rowind , const MKL_INT * *colind , const MKL_INT *nnz , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ double alpha = 1.0, beta = 0.0; const char *trans = "N"; const char *metadescra = "GXXF"; mkl_dcoomm( trans, &(A->nrows), &(C->ncols), &(A->ncols), &(alpha), metadescra, A->values, A->rows, A->cols, &(A->nnz), B->d, &(B->nrows), &(beta), C->d, &(C->nrows)); } void coo_matrix_transpose_matrix_mult(mat_coo * A, mat * B, mat * C) { /* * void mkl_dcoomm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *rowind , const MKL_INT * *colind , const MKL_INT *nnz , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ double alpha = 1.0, beta = 0.0; const char *trans = "T"; const char *metadescra = "GXXF"; mkl_dcoomm( trans, &(A->nrows), &(C->ncols), &(A->ncols), &(alpha), metadescra, A->values, A->rows, A->cols, &(A->nnz), B->d, &(B->nrows), &(beta), C->d, &(C->nrows)); } void coo_matrix_copy_to_dense(mat_coo * A, mat * B) { int i, j; //printf("z1\n"); for (i = 0; i < B->nrows; i++) { for (j = 0; j < B->ncols; j++) { matrix_set_element(B, i, j, 0.0); } } //printf("z2\n"); for (i = 0; i < A->nnz; i++) { matrix_set_element(B, *(A->rows + i) - 1, *(A->cols + i) - 1, *(A->values + i)); } //printf("z3\n"); } double get_rand_uniform(VSLStreamStatePtr stream) { double ans; vdRngUniform(VSL_RNG_METHOD_UNIFORM_STD, stream, 1, &ans, 0.0, 1.0); return ans; } double get_rand_normal(VSLStreamStatePtr stream) { double ans; vdRngUniform(VSL_RNG_METHOD_UNIFORM_STD, stream, 1, &ans, 0.0, 1.0); return ans; } void gen_rand_coo_matrix(mat_coo * M, double density) { VSLStreamStatePtr stream_u; VSLStreamStatePtr stream_n; //vslNewStream(&stream_u, BRNG, time(NULL)); //vslNewStream(&stream_n, BRNG, time(NULL)); vslNewStream(&stream_u, BRNG, 123); vslNewStream(&stream_n, BRNG, 456); int i, j; for (i = 0; i < M->nrows; i++) { for (j = 0; j < M->ncols; j++) { if (get_rand_uniform(stream_u) < density) { set_coo_matrix_element(M, i, j, get_rand_normal(stream_n), 1); } } } } void coo_matrix_sort_element(mat_coo * A) { int i, j; //seletion sort for (i = 0; i < A->nnz; i++) { for (j = i + 1; j < A->nnz; j++) { if ((A->rows[i] > A->rows[j]) || (A->rows[i] == A->rows[j] && A->cols[i] > A->cols[j])) { double dtemp; int itemp; itemp = A->rows[i]; A->rows[i] = A->rows[j]; A->rows[j] = itemp; itemp = A->cols[i]; A->cols[i] = A->cols[j]; A->cols[j] = itemp; dtemp = A->values[i]; A->values[i] = A->values[j]; A->values[j] = dtemp; } } } } void csr_matrix_delete(mat_csr * M) { free(M->values); free(M->cols); free(M->pointerB); free(M->pointerE); free(M); } void csr_matrix_print(mat_csr * M) { int i; printf("values: "); for (i = 0; i < M->nnz; i++) { printf("%f ", M->values[i]); } printf("\ncolumns: "); for (i = 0; i < M->nnz; i++) { printf("%d ", M->cols[i]); } printf("\npointerB: "); for (i = 0; i < M->nrows; i++) { printf("%d\t", M->pointerB[i]); } printf("\npointerE: "); for (i = 0; i < M->nrows; i++) { printf("%d\t", M->pointerE[i]); } printf("\n"); } mat_csr * csr_matrix_new() { mat_csr *M = (mat_csr *) malloc(sizeof(mat_csr)); return M; } void csr_init_from_coo(mat_csr * D, mat_coo * M) { D->nrows = M->nrows; D->ncols = M->ncols; D->pointerB = (int *)malloc(D->nrows * sizeof(int)); D->pointerE = (int *)malloc(D->nrows * sizeof(int)); D->cols = (int *)calloc(M->nnz, sizeof(int)); D->nnz = M->nnz; //coo_matrix_sort_element(M); D->values = (double *)malloc(M->nnz * sizeof(double)); memcpy(D->values, M->values, M->nnz * sizeof(double)); int current_row, cursor = 0; for (current_row = 0; current_row < D->nrows; current_row++) { D->pointerB[current_row] = cursor + 1; while (M->rows[cursor] - 1 == current_row) { D->cols[cursor] = M->cols[cursor]; cursor++; } D->pointerE[current_row] = cursor + 1; } } void csr_matrix_matrix_mult(mat_csr * A, mat * B, mat * C) { /* * void mkl_dcsrmm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *indx , const MKL_INT * *pntrb , const MKL_INT *pntre , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ char *transa = "N"; double alpha = 1.0, beta = 0.0; const char *matdescra = "GXXF"; mkl_dcsrmm(transa, &(A->nrows), &(C->ncols), &(A->ncols), &alpha, matdescra, A->values, A->cols, A->pointerB, A->pointerE, B->d, &(B->nrows), &beta, C->d, &(C->nrows)); } void csr_matrix_transpose_matrix_mult(mat_csr * A, mat * B, mat * C) { /* * void mkl_dcsrmm ( const char *transa , const MKL_INT *m , const * MKL_INT *n , const MKL_INT *k , const double *alpha , const char * *matdescra , const double *val , const MKL_INT *indx , const MKL_INT * *pntrb , const MKL_INT *pntre , const double *b , const MKL_INT *ldb , * const double *beta , double *c , const MKL_INT *ldc ); */ char *transa = "T"; double alpha = 1.0, beta = 0.0; const char *matdescra = "GXXF"; mkl_dcsrmm(transa, &(A->nrows), &(C->ncols), &(A->ncols), &alpha, matdescra, A->values, A->cols, A->pointerB, A->pointerE, B->d, &(B->nrows), &beta, C->d, &(C->nrows)); }
static_linking.c
// RUN: %libomptarget-compile-generic -DLIBRARY -c -o %t.o // RUN: llvm-ar rcs %t.a %t.o // RUN: %libomptarget-compile-generic %t.a && %libomptarget-run-generic 2>&1 | %fcheck-generic // REQUIRES: nvptx64-nvidia-cuda-oldDriver // REQUIRES: amdgcn-amd-amdhsa-oldDriver #ifdef LIBRARY int x = 42; #pragma omp declare target(x) int foo() { int value; #pragma omp target map(from : value) value = x; return value; } #else #include <stdio.h> int foo(); int main() { int x = foo(); // CHECK: PASS if (x == 42) printf("PASS\n"); } #endif
// RUN:%libomptarget - compile - generic - DLIBRARY - c - o % t.o // RUN:llvm - ar rcs % t.a % t.o // RUN:%libomptarget - compile - generic % t.a && %libomptarget - run - generic 2 > &1 | %fcheck - generic // REQUIRES:nvptx64 - nvidia - cuda - oldDriver // REQUIRES:amdgcn - amd - amdhsa - oldDriver #ifdef LIBRARY int x = 42; int foo() { int value; value = x; return value; } #else #include <stdio.h> int foo(); int main() { int x = foo(); //CHECK:PASS if (x == 42) printf("PASS\n"); } #endif
// RUN:%libomptarget - compile - generic - DLIBRARY - c - o % t.o // RUN:llvm - ar rcs % t.a % t.o // RUN:%libomptarget - compile - generic % t.a && %libomptarget - run - generic 2 > &1 | %fcheck - generic // REQUIRES:nvptx64 - nvidia - cuda - oldDriver // REQUIRES:amdgcn - amd - amdhsa - oldDriver #ifdef LIBRARY int x = 42; #pragma omp declare target(x) int foo() { int value; #pragma omp target map(from : value) value = x; return value; } #else #include <stdio.h> int foo(); int main() { int x = foo(); //CHECK:PASS if (x == 42) printf("PASS\n"); } #endif
GB_binop__rminus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp32) // A*D function (colscale): GB (_AxD__rminus_fp32) // D*A function (rowscale): GB (_DxB__rminus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp32) // C=scalar+B GB (_bind1st__rminus_fp32) // C=scalar+B' GB (_bind1st_tran__rminus_fp32) // C=A+scalar GB (_bind2nd__rminus_fp32) // C=A'+scalar GB (_bind2nd_tran__rminus_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp32) // A*D function (colscale): GB (_AxD__rminus_fp32) // D*A function (rowscale): GB (_DxB__rminus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp32) // C=scalar+B GB (_bind1st__rminus_fp32) // C=scalar+B' GB (_bind1st_tran__rminus_fp32) // C=A+scalar GB (_bind2nd__rminus_fp32) // C=A'+scalar GB (_bind2nd_tran__rminus_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_01__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp32) // A*D function (colscale): GB (_AxD__rminus_fp32) // D*A function (rowscale): GB (_DxB__rminus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp32) // C=scalar+B GB (_bind1st__rminus_fp32) // C=scalar+B' GB (_bind1st_tran__rminus_fp32) // C=A+scalar GB (_bind2nd__rminus_fp32) // C=A'+scalar GB (_bind2nd_tran__rminus_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_csr_matrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* In addition to publically accessible interface in HYPRE_mv.h, the implementation in this file uses accessor macros into the sequential matrix structure, and so includes the .h that defines that structure. Should those accessor functions become proper functions at some later date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* If create is called and row_starts and col_starts are NOT null, then it is assumed that they are of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix* hypre_ParCSRMatrixCreate( MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt *row_starts, HYPRE_BigInt *col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd ) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1]-first_row_index ; first_col_diag = col_starts[0]; local_num_cols = col_starts[1]-first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* We could make these null instead of leaving the range. If that change is made, then when this create is called from functions like the matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix ) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if ( hypre_ParCSRMatrixOwnsData(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if ( hypre_ParCSRMatrixDiagT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if ( hypre_ParCSRMatrixOffdT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if ( hypre_ParCSRMatrixOwnsColStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* RL: this is actually not correct since the memory_location may have been changed after allocation * put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) ) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if ( hypre_ParCSRMatrixProcOrdering(matrix) ) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_MemoryLocation memory_location ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) ); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix* hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix *A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location) ) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core( hypre_ParCSRMatrix *matrix, const char* format ) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_data ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_row_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_col_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead( MPI_Comm comm, const char *file_name ) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* the bgl input file should only contain the EXACT range for local processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i=1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows,0,0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix, const char *file_name ) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i=0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0]+(HYPRE_BigInt)base_i; iupper = row_starts[1]+(HYPRE_BigInt)base_i - 1; jlower = col_starts[0]+(HYPRE_BigInt)base_j; jupper = col_starts[1]+(HYPRE_BigInt)base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt)(i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i+1]; j++) { J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j); if ( diag_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if ( num_nonzeros_offd ) { for (j = offd_i[i]; j < offd_i[i+1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt)base_j; if ( offd_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J ); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_i_ptr, HYPRE_Int *base_j_ptr, hypre_ParCSRMatrix **matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int)row_starts[0]; base_j = (HYPRE_Int)col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag+(HYPRE_BigInt)num_cols-1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int)(I-big_base_i-first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt)offd_j[i]; hypre_BigQsort0(aux_offd_j,0,num_nonzeros_offd-1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i=1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i=0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i=0; i < num_rows; i++) { i_col = diag_i[i]; for (j=i_col; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix, HYPRE_BigInt *row_start, HYPRE_BigInt *row_end, HYPRE_BigInt *col_start, HYPRE_BigInt *col_end ) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id ); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id ); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return(-1); } /* if buffer is not allocated and some information is requested, allocate buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values )) { /* allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1,tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for ( i = 0; i < m; i++ ) { tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int)(row-row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow+1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); nzB = hypre_CSRMatrixI(Ba)[lrow+1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* Sort by increasing column numbers, assuming A and B already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for ( i = 0; i < nzA; i++ ) { v_p[imark+i] = vworkA[i]; } for ( i = imark; i < nzB; i++ ) { v_p[nzA+i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for ( i = 0; i < imark; i++ ) { idx_p[i] = cmap[cworkB[i]]; } } else { for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for ( i = 0; i < nzA; i++ ) { idx_p[imark+i] = cstart + cworkA[i]; } for ( i = imark; i < nzB; i++ ) { idx_p[nzA+i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(mat) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm, hypre_CSRMatrix *A, HYPRE_BigInt *global_row_starts, HYPRE_BigInt *global_col_starts ) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2*(num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* contains code for what to expect, if 0: global_row_starts = global_col_starts, only global_row_starts given if 1: only global_row_starts given, global_col_starts = NULL if 2: both global_row_starts and global_col_starts given if 3: only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2*(num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i+num_procs+5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); // Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); // Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i+1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int)global_row_starts[i+1]] - A_i[(HYPRE_Int)global_row_starts[i]]; } //num_nonzeros_proc[num_procs-1] = A_i[(HYPRE_Int)global_num_rows] - A_i[(HYPRE_Int)row_starts[num_procs-1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs-1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i-1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs-1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix *A, hypre_ParCSRMatrix *matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /*RL: XXX FIXME if A spans global column space, the following a_j should be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag +1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i=0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i=0; i < num_rows; i++) { for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int)(a_j[j]-first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i=0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); for (i=0; i < num_rows+1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows/num_threads; rest = num_rows - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } count = diag_i[ns]+offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j=diag_i[i]; j < diag_i[i+1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt)diag_j[j]+first_col_diag; } for (j=offd_i[i]; j < offd_i[i+1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* determine procs that have vector data and store their ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_BigInt)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); num_requests = 4*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* exchange contents of local_matrix_i - here we are sending to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering?*/ offset = matrix_i[new_vec_starts[1]]; for (i=1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++) matrix_i[j+1] += offset; offset = matrix_i[new_vec_starts[i+1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* generate datatypes for further data exchange and exchange remaining data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int)new_vec_starts[i+1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i=0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Int copy_data ) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B ) { hypre_ParCSRMatrix * C; HYPRE_BigInt * col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm( A ); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 , HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A ); hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A ); hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A ); hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A ); hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B ) == hypre_ParCSRMatrixFirstRowIndex( A ) ); hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A ); hypre_ParCSRMatrixOwnsRowStarts( C ) = 0; hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A ); hypre_ParCSRMatrixOwnsColStarts( C ) = 0; for ( p=0; p<=num_procs; ++p ) hypre_assert( hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B) ); hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A ); hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A ); hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A ); hypre_ParCSRMatrixDiag( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0 ); hypre_ParCSRMatrixOffd( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C ); hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C; hypre_ParCSRMatrixCommPkg( C ) = NULL; hypre_ParCSRMatrixCommPkgT( C ) = NULL; hypre_ParCSRMatrixOwnsData( C ) = 1; /* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. I suspect, but don't know, that other parts of hypre do not assume that the correct values have been set. hypre_ParCSRMatrixSetNumNonzeros( C ); hypre_ParCSRMatrixSetDNumNonzeros( C );*/ hypre_ParCSRMatrixNumNonzeros( C ) = 0; hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0; hypre_ParCSRMatrixRowindices( C ) = NULL; hypre_ParCSRMatrixRowvalues( C ) = NULL; hypre_ParCSRMatrixGetrowactive( C ) = 0; return C; } /* drop the entries that are not on the diagonal and smaller than * its row norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag ++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* in normal cases: diagonal entry should not * appear in A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd ++; } } } A_diag_i_i = A_diag_i[i+1]; A_offd_i_i = A_offd_i[i+1]; A_diag_i[i+1] = nnz_diag; A_offd_i[i+1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if ( hypre_ParCSRMatrixCommPkg(A) ) { hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) ); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Perform dual truncation of ParCSR matrix. * This code is adapted from original BoomerAMGInterpTruncate() * A: parCSR matrix to be modified * tol: relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero values after dropping), * this way, the application of the truncated matrix on a constant vector is the same as that of * the original matrix. * nrm_type: type of norm used for dropping with tol. * -- 0 = infinity-norm * -- 1 = 1-norm * -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global=0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. Cum_lost_per_thread * will temporarily store the cumulative number of dropped entries up to * each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cum_lost_per_thread; HYPRE_Int * num_lost_per_thread; HYPRE_Int * num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* Compute each thread's range of rows to truncate and compress. Note, * that i, j and data are all compressed as entries are dropped, but * that the compression only occurs locally over each thread's row * range. A_diag_i is only made globally consistent at the end of this * routine. During the dropping phases, A_diag_i[stop] will point to * the start of the next thread's row range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v*v; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v*v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i+1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i+1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < (A_diag_i[i+1]-num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i+1]-num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i=start; i<stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* Some rows exceed max_row_elmts, and require truncation. Essentially, * each thread truncates and compresses its range of rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* two temporary arrays to hold row i for temporary operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (max_row_elmts < num_elmts) { /* copy both diagonal and off-diag parts of row i to _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j]+num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt-cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j,A_aux_data,0,cnt-1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j]-num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag-A_diag_i[i]; num_lost_offd -= cnt_offd-A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* nothing dropped from this row, but still have to shift entries back * by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index-A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd-A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* Each thread has it's own locally compressed CSR matrix from rows start * to stop. Now, we have to copy each thread's chunk into the new * process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_diag_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size ; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_offd_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size ; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
/****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* * In addition to publically accessible interface in HYPRE_mv.h, the * implementation in this file uses accessor macros into the sequential * matrix structure, and so includes the .h that defines that structure. * Should those accessor functions become proper functions at some later * date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *, HYPRE_Int, HYPRE_Int, void *, MPI_Comm, void **, HYPRE_Int *); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* * If create is called and row_starts and col_starts are NOT null, then it is * assumed that they are of length 2 containing the start row of the calling * processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix * hypre_ParCSRMatrixCreate(MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt * row_starts, HYPRE_BigInt * col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* * row_starts[0] is start of local rows. row_starts[1] is start of next * processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1] - first_row_index; first_col_diag = col_starts[0]; local_num_cols = col_starts[1] - first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; //JSP:transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* * We could make these null instead of leaving the range. If that change * is made, then when this create is called from functions like the * matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy(hypre_ParCSRMatrix * matrix) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if (hypre_ParCSRMatrixOwnsData(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if (hypre_ParCSRMatrixDiagT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if (hypre_ParCSRMatrixOffdT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if (hypre_ParCSRMatrixOwnsRowStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixOwnsColStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* * RL: this is actually not correct since the memory_location may * have been changed after allocation put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if (hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix)) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if (hypre_ParCSRMatrixProcOrdering(matrix)) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2(hypre_ParCSRMatrix * matrix, HYPRE_MemoryLocation memory_location) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix * A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A))); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix * hypre_ParCSRMatrixClone(hypre_ParCSRMatrix * A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix * A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if (hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location)) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core(hypre_ParCSRMatrix * matrix, const char *format) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_data) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_row_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_col_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead(MPI_Comm comm, const char *file_name) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* * the bgl input file should only contain the EXACT range for local * processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i = 1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows, 0, 0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint(hypre_ParCSRMatrix * matrix, const char *file_name) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix), new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix), new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i = 0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ(const hypre_ParCSRMatrix * matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0] + (HYPRE_BigInt) base_i; iupper = row_starts[1] + (HYPRE_BigInt) base_i - 1; jlower = col_starts[0] + (HYPRE_BigInt) base_j; jupper = col_starts[1] + (HYPRE_BigInt) base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt) (i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i + 1]; j++) { J = first_col_diag + (HYPRE_BigInt) (diag_j[j] + base_j); if (diag_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if (num_nonzeros_offd) { for (j = offd_i[i]; j < offd_i[i + 1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt) base_j; if (offd_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ(MPI_Comm comm, const char *filename, HYPRE_Int * base_i_ptr, HYPRE_Int * base_j_ptr, hypre_ParCSRMatrix ** matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int) row_starts[0]; base_j = (HYPRE_Int) col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag + (HYPRE_BigInt) num_cols - 1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag + num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int) (I - big_base_i - first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int) (J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt) offd_j[i]; hypre_BigQsort0(aux_offd_j, 0, num_nonzeros_offd - 1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i = 1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i = 0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i = 0; i < num_rows; i++) { i_col = diag_i[i]; for (j = i_col; j < diag_i[i + 1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange(hypre_ParCSRMatrix * matrix, HYPRE_BigInt * row_start, HYPRE_BigInt * row_end, HYPRE_BigInt * col_start, HYPRE_BigInt * col_end) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(matrix), &my_id); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return (-1); } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(mat), &my_id); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return (-1); } /* * if buffer is not allocated and some information is requested, allocate * buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values)) { /* * allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1, tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for (i = 0; i < m; i++) { tmp = hypre_CSRMatrixI(Aa)[i + 1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i + 1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int) (row - row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow + 1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &(hypre_CSRMatrixJ(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); vworkA = &(hypre_CSRMatrixData(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); nzB = hypre_CSRMatrixI(Ba)[lrow + 1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &(hypre_CSRMatrixJ(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); vworkB = &(hypre_CSRMatrixData(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* * Sort by increasing column numbers, assuming A and B * already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for (i = 0; i < nzA; i++) { v_p[imark + i] = vworkA[i]; } for (i = imark; i < nzB; i++) { v_p[nzA + i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for (i = 0; i < imark; i++) { idx_p[i] = cmap[cworkB[i]]; } } else { for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for (i = 0; i < nzA; i++) { idx_p[imark + i] = cstart + cworkA[i]; } for (i = imark; i < nzB; i++) { idx_p[nzA + i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(mat)); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow(hypre_ParCSRMatrix * matrix, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix(MPI_Comm comm, hypre_CSRMatrix * A, HYPRE_BigInt * global_row_starts, HYPRE_BigInt * global_col_starts) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2 * (num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* * contains code for what to expect, if 0: * global_row_starts = global_col_starts, only * global_row_starts given if 1: only global_row_starts * given, global_col_starts = NULL if 2: both * global_row_starts and global_col_starts given if 3: * only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2 * (num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i + num_procs + 5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); //Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); //Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i + 1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int) global_row_starts[i + 1]] - A_i[(HYPRE_Int) global_row_starts[i]]; } //num_nonzeros_proc[num_procs - 1] = A_i[(HYPRE_Int) global_num_rows] - A_i[(HYPRE_Int) row_starts[num_procs - 1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs - 1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs - 1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i - 1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs - 1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix * A, hypre_ParCSRMatrix * matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /* * RL: XXX FIXME if A spans global column space, the following a_j should * be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows] - first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int) (last_col_diag - first_col_diag + 1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < (HYPRE_Int) first_col_diag || a_j[j] > (HYPRE_Int) last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int) (a_j[j] - first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i = 0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows + 1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix * par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows / num_threads; rest = num_rows - size * num_threads; for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } count = diag_i[ns] + offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j = diag_i[i]; j < diag_i[i + 1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt) diag_j[j] + first_col_diag; } for (j = offd_i[i]; j < offd_i[i + 1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix * par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf = NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int) (hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* * determine procs that have vector data and store their ids in * used_procs */ /* * we need to do an exchange data for this. If I own row then I will * contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int) hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /* build the response object */ /* send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /* this is where we keep info * from contacts */ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void **)&response_recv_buf, &response_recv_buf_starts); /* * now processor 0 should have a list of ranges for processors that have * rows - these are in send_proc_obj - it needs to create the new list of * processors and also an array of vec starts - and send to those who own * row */ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_types; i++) { used_procs[i - 1] = send_info[i]; } for (i = num_types + 1; i < count; i++) { new_vec_starts[i - num_types - 1] = send_info[i]; } } else /* clean up and exit */ /* * hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); * if(response_recv_buf) hypre_TFree(response_recv_buf, * HYPRE_MEMORY_HOST); if(response_recv_buf_starts) * hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); * * * if (hypre_CSRMatrixOwnsData(local_matrix)) * hypre_CSRMatrixDestroy(local_matrix); else * hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); * * * return NULL; } } else /* my_id ==0 */ /* * num_types = send_proc_obj.length; used_procs = * hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); * new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, * HYPRE_MEMORY_HOST); * * new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { * used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = * send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, * num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); * /*now we need to put into an array to send */ count = 2 * num_types + 2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i = 1; i <= num_types; i++) { send_info[i] = (HYPRE_BigInt) used_procs[i - 1]; } for (i = num_types + 1; i < count; i++) { send_info[i] = new_vec_starts[i - num_types - 1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first */ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i = start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i - start]); } hypre_MPI_Waitall(num_types - start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if (response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if (response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* * everyone left has rows and knows: new_vec_starts, num_types, and * used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); num_requests = 4 * num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* * exchange contents of local_matrix_i - here we are sending to ourself * also */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int) (new_vec_starts[i + 1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i] + 1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering? */ offset = matrix_i[new_vec_starts[1]]; for (i = 1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i + 1]; j++) matrix_i[j + 1] += offset; offset = matrix_i[new_vec_starts[i + 1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* * generate datatypes for further data exchange and exchange remaining * data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int) new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int) new_vec_starts[i + 1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B, HYPRE_Int copy_data) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int * response_message_size) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt *) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse *) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements *) response_obj->data2; hypre_MPI_Comm_rank(comm, &myid); /* * check to see if we need to allocate more space in send_proc_obj for * ids */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length += 10; /* add space for 10 more * processors */ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /* initialize */ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* this is the number of * elements */ /* send proc */ send_proc_obj->id[count] = contact_proc; /* do we need more storage for the elements? */ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /* populate send_proc_obj */ for (i = 0; i < contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count + 1] = index; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B) { hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrixGlobalNumRows(C) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixGlobalNumCols(C) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixFirstRowIndex(C) = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(hypre_ParCSRMatrixFirstRowIndex(B) == hypre_ParCSRMatrixFirstRowIndex(A)); hypre_ParCSRMatrixRowStarts(C) = hypre_ParCSRMatrixRowStarts(A); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixColStarts(C) = hypre_ParCSRMatrixColStarts(A); hypre_ParCSRMatrixOwnsColStarts(C) = 0; for (p = 0; p <= num_procs; ++p) hypre_assert(hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B)); hypre_ParCSRMatrixFirstColDiag(C) = hypre_ParCSRMatrixFirstColDiag(A); hypre_ParCSRMatrixLastRowIndex(C) = hypre_ParCSRMatrixLastRowIndex(A); hypre_ParCSRMatrixLastColDiag(C) = hypre_ParCSRMatrixLastColDiag(A); hypre_ParCSRMatrixDiag(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0); hypre_ParCSRMatrixOffd(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C); hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; hypre_ParCSRMatrixOwnsData(C) = 1; /* * SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. * I suspect, but don't know, that other parts of hypre do not assume * that the correct values have been set. * hypre_ParCSRMatrixSetNumNonzeros( C ); * hypre_ParCSRMatrixSetDNumNonzeros( C ); */ hypre_ParCSRMatrixNumNonzeros(C) = 0; hypre_ParCSRMatrixDNumNonzeros(C) = 0.0; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; return C; } /* * drop the entries that are not on the diagonal and smaller than its row * norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* * in normal cases: diagonal entry should not appear in * A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd++; } } } A_diag_i_i = A_diag_i[i + 1]; A_offd_i_i = A_offd_i[i + 1]; A_diag_i[i + 1] = nnz_diag; A_offd_i[i + 1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if (hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(A)); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* * Perform dual truncation of ParCSR matrix. This code is adapted from * original BoomerAMGInterpTruncate() A: parCSR matrix to be modified tol: * relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero * values after dropping), this way, the application of the truncated matrix * on a constant vector is the same as that of the original matrix. nrm_type: * type of norm used for dropping with tol. -- 0 = infinity-norm -- 1 = * 1-norm -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global = 0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* * Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. * Cum_lost_per_thread will temporarily store the cumulative number of * dropped entries up to each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int *max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int *cum_lost_per_thread; HYPRE_Int *num_lost_per_thread; HYPRE_Int *num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* * Compute each thread's range of rows to truncate and compress. * Note, that i, j and data are all compressed as entries are * dropped, but that the compression only occurs locally over each * thread's row range. A_diag_i is only made globally consistent at * the end of this routine. During the dropping phases, * A_diag_i[stop] will point to the start of the next thread's row * range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v * v; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v * v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i + 1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i + 1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < (A_diag_i[i + 1] - num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i + 1] - num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* * Some rows exceed max_row_elmts, and require truncation. * Essentially, each thread truncates and compresses its range of * rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* * two temporary arrays to hold row i for temporary * operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this * thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (max_row_elmts < num_elmts) { /* * copy both diagonal and off-diag parts of row i to * _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j] + num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt - cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j, A_aux_data, 0, cnt - 1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j] - num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag - A_diag_i[i]; num_lost_offd -= cnt_offd - A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* * nothing dropped from this row, but still have to * shift entries back by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index - A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd - A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* * Each thread has it's own locally compressed CSR matrix from * rows start to stop. Now, we have to copy each thread's chunk * into the new process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } /* * update A_diag_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } /* * update A_offd_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
/****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* * In addition to publically accessible interface in HYPRE_mv.h, the * implementation in this file uses accessor macros into the sequential * matrix structure, and so includes the .h that defines that structure. * Should those accessor functions become proper functions at some later * date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *, HYPRE_Int, HYPRE_Int, void *, MPI_Comm, void **, HYPRE_Int *); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* * If create is called and row_starts and col_starts are NOT null, then it is * assumed that they are of length 2 containing the start row of the calling * processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix * hypre_ParCSRMatrixCreate(MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt * row_starts, HYPRE_BigInt * col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* * row_starts[0] is start of local rows. row_starts[1] is start of next * processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1] - first_row_index; first_col_diag = col_starts[0]; local_num_cols = col_starts[1] - first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; //JSP:transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* * We could make these null instead of leaving the range. If that change * is made, then when this create is called from functions like the * matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy(hypre_ParCSRMatrix * matrix) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if (hypre_ParCSRMatrixOwnsData(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if (hypre_ParCSRMatrixDiagT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if (hypre_ParCSRMatrixOffdT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if (hypre_ParCSRMatrixOwnsRowStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixOwnsColStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* * RL: this is actually not correct since the memory_location may * have been changed after allocation put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if (hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix)) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if (hypre_ParCSRMatrixProcOrdering(matrix)) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2(hypre_ParCSRMatrix * matrix, HYPRE_MemoryLocation memory_location) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix * A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A))); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix * hypre_ParCSRMatrixClone(hypre_ParCSRMatrix * A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix * A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if (hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location)) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core(hypre_ParCSRMatrix * matrix, const char *format) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_data) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_row_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_col_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead(MPI_Comm comm, const char *file_name) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* * the bgl input file should only contain the EXACT range for local * processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i = 1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows, 0, 0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint(hypre_ParCSRMatrix * matrix, const char *file_name) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix), new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix), new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i = 0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ(const hypre_ParCSRMatrix * matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0] + (HYPRE_BigInt) base_i; iupper = row_starts[1] + (HYPRE_BigInt) base_i - 1; jlower = col_starts[0] + (HYPRE_BigInt) base_j; jupper = col_starts[1] + (HYPRE_BigInt) base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt) (i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i + 1]; j++) { J = first_col_diag + (HYPRE_BigInt) (diag_j[j] + base_j); if (diag_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if (num_nonzeros_offd) { for (j = offd_i[i]; j < offd_i[i + 1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt) base_j; if (offd_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ(MPI_Comm comm, const char *filename, HYPRE_Int * base_i_ptr, HYPRE_Int * base_j_ptr, hypre_ParCSRMatrix ** matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int) row_starts[0]; base_j = (HYPRE_Int) col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag + (HYPRE_BigInt) num_cols - 1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag + num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int) (I - big_base_i - first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int) (J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt) offd_j[i]; hypre_BigQsort0(aux_offd_j, 0, num_nonzeros_offd - 1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i = 1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i = 0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i = 0; i < num_rows; i++) { i_col = diag_i[i]; for (j = i_col; j < diag_i[i + 1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange(hypre_ParCSRMatrix * matrix, HYPRE_BigInt * row_start, HYPRE_BigInt * row_end, HYPRE_BigInt * col_start, HYPRE_BigInt * col_end) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(matrix), &my_id); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return (-1); } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(mat), &my_id); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return (-1); } /* * if buffer is not allocated and some information is requested, allocate * buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values)) { /* * allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1, tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for (i = 0; i < m; i++) { tmp = hypre_CSRMatrixI(Aa)[i + 1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i + 1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int) (row - row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow + 1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &(hypre_CSRMatrixJ(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); vworkA = &(hypre_CSRMatrixData(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); nzB = hypre_CSRMatrixI(Ba)[lrow + 1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &(hypre_CSRMatrixJ(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); vworkB = &(hypre_CSRMatrixData(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* * Sort by increasing column numbers, assuming A and B * already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for (i = 0; i < nzA; i++) { v_p[imark + i] = vworkA[i]; } for (i = imark; i < nzB; i++) { v_p[nzA + i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for (i = 0; i < imark; i++) { idx_p[i] = cmap[cworkB[i]]; } } else { for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for (i = 0; i < nzA; i++) { idx_p[imark + i] = cstart + cworkA[i]; } for (i = imark; i < nzB; i++) { idx_p[nzA + i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(mat)); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow(hypre_ParCSRMatrix * matrix, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix(MPI_Comm comm, hypre_CSRMatrix * A, HYPRE_BigInt * global_row_starts, HYPRE_BigInt * global_col_starts) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2 * (num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* * contains code for what to expect, if 0: * global_row_starts = global_col_starts, only * global_row_starts given if 1: only global_row_starts * given, global_col_starts = NULL if 2: both * global_row_starts and global_col_starts given if 3: * only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2 * (num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i + num_procs + 5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); //Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); //Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i + 1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int) global_row_starts[i + 1]] - A_i[(HYPRE_Int) global_row_starts[i]]; } //num_nonzeros_proc[num_procs - 1] = A_i[(HYPRE_Int) global_num_rows] - A_i[(HYPRE_Int) row_starts[num_procs - 1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs - 1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs - 1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i - 1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs - 1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix * A, hypre_ParCSRMatrix * matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /* * RL: XXX FIXME if A spans global column space, the following a_j should * be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows] - first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int) (last_col_diag - first_col_diag + 1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < (HYPRE_Int) first_col_diag || a_j[j] > (HYPRE_Int) last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int) (a_j[j] - first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i = 0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows + 1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix * par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows / num_threads; rest = num_rows - size * num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } count = diag_i[ns] + offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j = diag_i[i]; j < diag_i[i + 1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt) diag_j[j] + first_col_diag; } for (j = offd_i[i]; j < offd_i[i + 1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix * par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf = NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int) (hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* * determine procs that have vector data and store their ids in * used_procs */ /* * we need to do an exchange data for this. If I own row then I will * contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int) hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /* build the response object */ /* send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /* this is where we keep info * from contacts */ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void **)&response_recv_buf, &response_recv_buf_starts); /* * now processor 0 should have a list of ranges for processors that have * rows - these are in send_proc_obj - it needs to create the new list of * processors and also an array of vec starts - and send to those who own * row */ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_types; i++) { used_procs[i - 1] = send_info[i]; } for (i = num_types + 1; i < count; i++) { new_vec_starts[i - num_types - 1] = send_info[i]; } } else /* clean up and exit */ /* * hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); * if(response_recv_buf) hypre_TFree(response_recv_buf, * HYPRE_MEMORY_HOST); if(response_recv_buf_starts) * hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); * * * if (hypre_CSRMatrixOwnsData(local_matrix)) * hypre_CSRMatrixDestroy(local_matrix); else * hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); * * * return NULL; } } else /* my_id ==0 */ /* * num_types = send_proc_obj.length; used_procs = * hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); * new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, * HYPRE_MEMORY_HOST); * * new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { * used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = * send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, * num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); * /*now we need to put into an array to send */ count = 2 * num_types + 2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i = 1; i <= num_types; i++) { send_info[i] = (HYPRE_BigInt) used_procs[i - 1]; } for (i = num_types + 1; i < count; i++) { send_info[i] = new_vec_starts[i - num_types - 1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first */ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i = start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i - start]); } hypre_MPI_Waitall(num_types - start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if (response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if (response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* * everyone left has rows and knows: new_vec_starts, num_types, and * used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); num_requests = 4 * num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* * exchange contents of local_matrix_i - here we are sending to ourself * also */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int) (new_vec_starts[i + 1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i] + 1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering? */ offset = matrix_i[new_vec_starts[1]]; for (i = 1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i + 1]; j++) matrix_i[j + 1] += offset; offset = matrix_i[new_vec_starts[i + 1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* * generate datatypes for further data exchange and exchange remaining * data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int) new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int) new_vec_starts[i + 1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B, HYPRE_Int copy_data) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int * response_message_size) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt *) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse *) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements *) response_obj->data2; hypre_MPI_Comm_rank(comm, &myid); /* * check to see if we need to allocate more space in send_proc_obj for * ids */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length += 10; /* add space for 10 more * processors */ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /* initialize */ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* this is the number of * elements */ /* send proc */ send_proc_obj->id[count] = contact_proc; /* do we need more storage for the elements? */ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /* populate send_proc_obj */ for (i = 0; i < contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count + 1] = index; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B) { hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrixGlobalNumRows(C) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixGlobalNumCols(C) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixFirstRowIndex(C) = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(hypre_ParCSRMatrixFirstRowIndex(B) == hypre_ParCSRMatrixFirstRowIndex(A)); hypre_ParCSRMatrixRowStarts(C) = hypre_ParCSRMatrixRowStarts(A); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixColStarts(C) = hypre_ParCSRMatrixColStarts(A); hypre_ParCSRMatrixOwnsColStarts(C) = 0; for (p = 0; p <= num_procs; ++p) hypre_assert(hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B)); hypre_ParCSRMatrixFirstColDiag(C) = hypre_ParCSRMatrixFirstColDiag(A); hypre_ParCSRMatrixLastRowIndex(C) = hypre_ParCSRMatrixLastRowIndex(A); hypre_ParCSRMatrixLastColDiag(C) = hypre_ParCSRMatrixLastColDiag(A); hypre_ParCSRMatrixDiag(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0); hypre_ParCSRMatrixOffd(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C); hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; hypre_ParCSRMatrixOwnsData(C) = 1; /* * SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. * I suspect, but don't know, that other parts of hypre do not assume * that the correct values have been set. * hypre_ParCSRMatrixSetNumNonzeros( C ); * hypre_ParCSRMatrixSetDNumNonzeros( C ); */ hypre_ParCSRMatrixNumNonzeros(C) = 0; hypre_ParCSRMatrixDNumNonzeros(C) = 0.0; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; return C; } /* * drop the entries that are not on the diagonal and smaller than its row * norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* * in normal cases: diagonal entry should not appear in * A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd++; } } } A_diag_i_i = A_diag_i[i + 1]; A_offd_i_i = A_offd_i[i + 1]; A_diag_i[i + 1] = nnz_diag; A_offd_i[i + 1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if (hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(A)); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* * Perform dual truncation of ParCSR matrix. This code is adapted from * original BoomerAMGInterpTruncate() A: parCSR matrix to be modified tol: * relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero * values after dropping), this way, the application of the truncated matrix * on a constant vector is the same as that of the original matrix. nrm_type: * type of norm used for dropping with tol. -- 0 = infinity-norm -- 1 = * 1-norm -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global = 0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* * Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. * Cum_lost_per_thread will temporarily store the cumulative number of * dropped entries up to each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int *max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int *cum_lost_per_thread; HYPRE_Int *num_lost_per_thread; HYPRE_Int *num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* * Compute each thread's range of rows to truncate and compress. * Note, that i, j and data are all compressed as entries are * dropped, but that the compression only occurs locally over each * thread's row range. A_diag_i is only made globally consistent at * the end of this routine. During the dropping phases, * A_diag_i[stop] will point to the start of the next thread's row * range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v * v; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v * v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i + 1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i + 1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < (A_diag_i[i + 1] - num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i + 1] - num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* * Some rows exceed max_row_elmts, and require truncation. * Essentially, each thread truncates and compresses its range of * rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* * two temporary arrays to hold row i for temporary * operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this * thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (max_row_elmts < num_elmts) { /* * copy both diagonal and off-diag parts of row i to * _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j] + num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt - cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j, A_aux_data, 0, cnt - 1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j] - num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag - A_diag_i[i]; num_lost_offd -= cnt_offd - A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* * nothing dropped from this row, but still have to * shift entries back by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index - A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd - A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* * Each thread has it's own locally compressed CSR matrix from * rows start to stop. Now, we have to copy each thread's chunk * into the new process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * update A_diag_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * update A_offd_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
par_csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /*-------------------------------------------------------------------------- * hypre_ParMatmul_RowSizes: * * Computes sizes of C rows. Formerly part of hypre_ParMatmul but removed * so it can also be used for multiplication of Boolean matrices. * * Arrays computed: C_diag_i, C_offd_i. * * Arrays needed: (17, all HYPRE_Int*) * rownnz_A, * A_diag_i, A_diag_j, * A_offd_i, A_offd_j, * B_diag_i, B_diag_j, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j, * col_map_offd_B, col_map_offd_B, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j. * * Scalars computed: C_diag_size, C_offd_size. * * Scalars needed: * num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, * first_col_diag_B, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C *--------------------------------------------------------------------------*/ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int **C_diag_i, HYPRE_Int **C_offd_i, HYPRE_Int *rownnz_A, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Int *A_offd_i, HYPRE_Int *A_offd_j, HYPRE_Int *B_diag_i, HYPRE_Int *B_diag_j, HYPRE_Int *B_offd_i, HYPRE_Int *B_offd_j, HYPRE_Int *B_ext_diag_i, HYPRE_Int *B_ext_diag_j, HYPRE_Int *B_ext_offd_i, HYPRE_Int *B_ext_offd_j, HYPRE_Int *map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rownnz_A, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int size, rest, num_threads; HYPRE_Int ii, ns, ne; num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[ii1] = jj_row_begin_diag; (*C_offd_i)[ii1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct diag_i and offd_i - phase 1 */ if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { ii1 = rownnz_A ? rownnz_A[i1] : i1; (*C_diag_i)[ii1] += jj_count_diag; (*C_offd_i)[ii1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } /* Correct diag_i and offd_i - phase 2 */ if (rownnz_A != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i1 = ns; i1 < (ne-1); i1++) { for (ii1 = rownnz_A[i1] + 1; ii1 < rownnz_A[i1+1]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[i1+1]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[i1+1]]; } } if (ii < (num_threads - 1)) { for (ii1 = rownnz_A[ne-1] + 1; ii1 < rownnz_A[ne]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[ne]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[ne]]; } } else { for (ii1 = rownnz_A[ne-1] + 1; ii1 < num_rows_diag_A; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[num_rows_diag_A]; (*C_offd_i)[ii1] = (*C_offd_i)[num_rows_diag_A]; } } } } /* end parallel loop */ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; #ifdef HYPRE_DEBUG HYPRE_Int i; for (i = 0; i < num_rows_diag_A; i++) { hypre_assert((*C_diag_i)[i] <= (*C_diag_i)[i+1]); hypre_assert((*C_offd_i)[i] <= (*C_offd_i)[i+1]); } #endif hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul: * * Multiplies two ParCSRMatrices A and B and returns the product in * ParCSRMatrix C. * * Note: C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif /* ParCSRMatrix A */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rownnz_A; HYPRE_Int *rownnz_A = NULL; /* ParCSRMatrix B */ HYPRE_BigInt nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_BigInt last_col_diag_B; /* A_diag */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_diag_ir = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* A_offd */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *A_offd_ir = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); /* B_diag */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* B_offd */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); /* ParCSRMatrix C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C = NULL; /* C_diag */ hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; /* C_offd */ hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Int C_diag_size; /* Bs_ext */ hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (ncols_A != nrows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if C=A*B is square globally and locally, then C_diag should be square also */ if ( num_rows_diag_A == num_cols_diag_B && nrows_A == ncols_B ) { allsquare = 1; } /* Set rownnz of A */ if (num_rownnz_diag_A != num_rows_diag_A && num_rownnz_offd_A != num_rows_offd_A ) { hypre_MergeOrderedArrays(num_rownnz_diag_A, A_diag_ir, num_rownnz_offd_A, A_offd_ir, &num_rownnz_A, &rownnz_A); } else { num_rownnz_A = hypre_max(num_rows_diag_A, num_rows_offd_A); } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt) num_cols_diag_B - 1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); } } if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes(memory_location_C, &C_diag_i, &C_offd_i, rownnz_A, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[rownnz_A ? rownnz_A[ns] : ns]; jj_count_offd = C_offd_i[rownnz_A ? rownnz_A[ns] : ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, nrows_A, ncols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C, 0); hypre_ParCSRMatrixSetColStartsOwner(C, 0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixSetRownnz(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixSetRownnz(C_offd); hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } hypre_TFree(rownnz_A, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); HYPRE_BigInt first_row_index = row_starts[0]; num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] < 0) len++; } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] > 0) len++; } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = 0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i = 0; i < num_recvs; i++) { for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { B_ext_i[j+1] += B_ext_i[j]; } } *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; } for (i = 0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i = 0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; } for (i = 0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i = 0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i = 0; i < num_cols; i++) { AT_offd_i[i+1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i = 0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k = 0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i = num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i-1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i = 1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) { hypre_ParCSRMatrixOwnsColStarts(AT) = 0; } hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; } hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) { hypre_TFree(children, HYPRE_MEMORY_HOST); } /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; hypre_TFree(edges_marked, HYPRE_MEMORY_HOST); if (G_type != 0) { hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) { proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; } /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) { A_marker[i] = -1; } for (i = ns; i < ne; i++) { D_tmp[i] = 1.0/d[i]; } num_cols = C_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i = 0; i < num_cols_offd_B; i++) { A_marker[i] = -1; } num_cols = C_offd_i[ns]; for (i = ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul: * * Multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt nrows_A, ncols_A; HYPRE_BigInt nrows_B, ncols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (nrows_A != nrows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); hypre_CSRMatrixNumRownnz(C_tmp_offd) = 0; } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { temp[C_ext_offd_size++] = C_ext_j[j]; } else { C_ext_diag_size++; } } C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } for (i = 0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute: * C_diag = C_tmp_diag + C_ext_diag * C_offd = C_tmp_offd + C_ext_offd * * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } } } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else { C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else { C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } } break; } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, ncols_A, ncols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = ncols_A; hypre_ParCSRMatrixGlobalNumCols(C) = ncols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) { hypre_CSRMatrixSetRownnz(C_diag); hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_CSRMatrixSetRownnz(C_offd); hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i = 0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncCudaComputeStream(hypre_handle()); } #endif return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_TMemcpy(part, hypre_ParVectorPartitioning(b), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); hypre_TMemcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i-1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k) #endif for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i+1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixAdd: performs C = alpha*A + beta*B * * A and B are assumed to have the same row and column partitionings *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { /* ParCSRMatrix data */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt num_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *rownnz_diag_A = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *rownnz_offd_A = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *rownnz_diag_B = hypre_CSRMatrixRownnz(B_diag); HYPRE_Int num_rownnz_diag_B = hypre_CSRMatrixNumRownnz(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *rownnz_offd_B = hypre_CSRMatrixRownnz(B_offd); HYPRE_Int num_rownnz_offd_B = hypre_CSRMatrixNumRownnz(B_offd); HYPRE_Int num_rows_offd_B = hypre_CSRMatrixNumRows(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd; /* C data */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C; HYPRE_BigInt *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *C_diag_i, *C_offd_i; HYPRE_Int *rownnz_diag_C = NULL; HYPRE_Int *rownnz_offd_C = NULL; HYPRE_Int num_rownnz_diag_C; HYPRE_Int num_rownnz_offd_C; HYPRE_Int num_rows_diag_C = num_rows_diag_A; HYPRE_Int num_cols_diag_C = num_cols_diag_A; HYPRE_Int num_rows_offd_C = num_rows_offd_A; HYPRE_Int num_cols_offd_C = num_cols_offd_A + num_cols_offd_B; HYPRE_Int *twspace; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); HYPRE_ANNOTATE_FUNC_BEGIN; hypre_assert(num_rows_A == num_rows_B); hypre_assert(num_cols_A == num_cols_B); hypre_assert(num_rows_diag_A == num_rows_diag_B); hypre_assert(num_cols_diag_A == num_cols_diag_B); /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_offd_A + 1, memory_location_C); col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); /* Compute num_cols_offd_C, A2C_offd, and B2C_offd*/ A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_union2(num_cols_offd_A, col_map_offd_A, num_cols_offd_B, col_map_offd_B, &num_cols_offd_C, col_map_offd_C, A2C_offd, B2C_offd); /* Set nonzero rows data of diag_C */ num_rownnz_diag_C = num_rows_diag_A; if ((num_rownnz_diag_A < num_rows_diag_A) && (num_rownnz_diag_B < num_rows_diag_B)) { hypre_MergeOrderedArrays( num_rownnz_diag_A, rownnz_diag_A, num_rownnz_diag_B, rownnz_diag_B, &num_rownnz_diag_C, &rownnz_diag_C); } /* Set nonzero rows data of offd_C */ num_rownnz_offd_C = num_rows_offd_A; if ((num_rownnz_offd_A < num_rows_offd_A) && (num_rownnz_offd_B < num_rows_offd_B)) { hypre_MergeOrderedArrays( num_rownnz_offd_A, rownnz_offd_A, num_rownnz_offd_B, rownnz_offd_B, &num_rownnz_offd_C, &rownnz_offd_C); } /* Set diag_C */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ii, num_threads; HYPRE_Int size, rest, ns, ne; HYPRE_Int *marker_diag; HYPRE_Int *marker_offd; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /*----------------------------------------------------------------------- * Compute C_diag = alpha*A_diag + beta*B_diag *-----------------------------------------------------------------------*/ size = num_rownnz_diag_C/num_threads; rest = num_rownnz_diag_C - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } marker_diag = hypre_TAlloc(HYPRE_Int, num_cols_diag_A, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_diag, NULL, NULL, A_diag, B_diag, num_rows_diag_C, num_rownnz_diag_C, num_cols_diag_C, rownnz_diag_C, memory_location_C, C_diag_i, &C_diag); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_diag, NULL, NULL, rownnz_diag_C, alpha, beta, A_diag, B_diag, C_diag); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Compute C_offd = alpha*A_offd + beta*B_offd *-----------------------------------------------------------------------*/ size = num_rownnz_offd_C/num_threads; rest = num_rownnz_offd_C - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, A_offd, B_offd, num_rows_offd_C, num_rownnz_offd_C, num_cols_offd_C, rownnz_offd_C, memory_location_C, C_offd_i, &C_offd); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, rownnz_offd_C, alpha, beta, A_offd, B_offd, C_offd); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); } /* end of omp parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); /* Create ParCSRMatrix C */ row_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_TMemcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); C = hypre_ParCSRMatrixCreate(comm, num_rows_A, num_cols_A, row_starts_C, col_starts_C, num_cols_offd_C, hypre_CSRMatrixNumNonzeros(C_diag), hypre_CSRMatrixNumNonzeros(C_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); hypre_ParCSRMatrixDiag(C) = C_diag; hypre_ParCSRMatrixOffd(C) = C_offd; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *C_ptr = C; HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFnorm *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsInit *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i-1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsWait *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractSubmatrixFC * * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts_in, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts; HYPRE_Int nf_local, nc_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs, *sub_idx_diag, *sub_idx_offd; HYPRE_Int num_sends, *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { /* copy cpts_starts first */ HYPRE_Int len; len = 2; cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_TMemcpy(cpts_starts, cpts_starts_in, HYPRE_BigInt, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); if (my_id == (num_procs -1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm); } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ B_first_col = B_col_starts[0]; /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k-1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
#include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /*-------------------------------------------------------------------------- * hypre_ParMatmul_RowSizes: * * Computes sizes of C rows. Formerly part of hypre_ParMatmul but removed * so it can also be used for multiplication of Boolean matrices. * * Arrays computed: C_diag_i, C_offd_i. * * Arrays needed: (17, all HYPRE_Int*) * rownnz_A, * A_diag_i, A_diag_j, * A_offd_i, A_offd_j, * B_diag_i, B_diag_j, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j, * col_map_offd_B, col_map_offd_B, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j. * * Scalars computed: C_diag_size, C_offd_size. * * Scalars needed: * num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, * first_col_diag_B, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C *--------------------------------------------------------------------------*/ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int **C_diag_i, HYPRE_Int **C_offd_i, HYPRE_Int *rownnz_A, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Int *A_offd_i, HYPRE_Int *A_offd_j, HYPRE_Int *B_diag_i, HYPRE_Int *B_diag_j, HYPRE_Int *B_offd_i, HYPRE_Int *B_offd_j, HYPRE_Int *B_ext_diag_i, HYPRE_Int *B_ext_diag_j, HYPRE_Int *B_ext_offd_i, HYPRE_Int *B_ext_offd_j, HYPRE_Int *map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rownnz_A, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ { HYPRE_Int *B_marker = NULL; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int size, rest, num_threads; HYPRE_Int ii, ns, ne; num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[ii1] = jj_row_begin_diag; (*C_offd_i)[ii1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); /* Correct diag_i and offd_i - phase 1 */ if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { ii1 = rownnz_A ? rownnz_A[i1] : i1; (*C_diag_i)[ii1] += jj_count_diag; (*C_offd_i)[ii1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } /* Correct diag_i and offd_i - phase 2 */ if (rownnz_A != NULL) { for (i1 = ns; i1 < (ne-1); i1++) { for (ii1 = rownnz_A[i1] + 1; ii1 < rownnz_A[i1+1]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[i1+1]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[i1+1]]; } } if (ii < (num_threads - 1)) { for (ii1 = rownnz_A[ne-1] + 1; ii1 < rownnz_A[ne]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[ne]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[ne]]; } } else { for (ii1 = rownnz_A[ne-1] + 1; ii1 < num_rows_diag_A; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[num_rows_diag_A]; (*C_offd_i)[ii1] = (*C_offd_i)[num_rows_diag_A]; } } } } /* end parallel loop */ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; #ifdef HYPRE_DEBUG HYPRE_Int i; for (i = 0; i < num_rows_diag_A; i++) { hypre_assert((*C_diag_i)[i] <= (*C_diag_i)[i+1]); hypre_assert((*C_offd_i)[i] <= (*C_offd_i)[i+1]); } #endif hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul: * * Multiplies two ParCSRMatrices A and B and returns the product in * ParCSRMatrix C. * * Note: C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif /* ParCSRMatrix A */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rownnz_A; HYPRE_Int *rownnz_A = NULL; /* ParCSRMatrix B */ HYPRE_BigInt nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_BigInt last_col_diag_B; /* A_diag */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_diag_ir = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* A_offd */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *A_offd_ir = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); /* B_diag */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* B_offd */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); /* ParCSRMatrix C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C = NULL; /* C_diag */ hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; /* C_offd */ hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Int C_diag_size; /* Bs_ext */ hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (ncols_A != nrows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if C=A*B is square globally and locally, then C_diag should be square also */ if ( num_rows_diag_A == num_cols_diag_B && nrows_A == ncols_B ) { allsquare = 1; } /* Set rownnz of A */ if (num_rownnz_diag_A != num_rows_diag_A && num_rownnz_offd_A != num_rows_offd_A ) { hypre_MergeOrderedArrays(num_rownnz_diag_A, A_diag_ir, num_rownnz_offd_A, A_offd_ir, &num_rownnz_A, &rownnz_A); } else { num_rownnz_A = hypre_max(num_rows_diag_A, num_rows_offd_A); } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt) num_cols_diag_B - 1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; for (i = 0; i < num_cols_offd_A; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); } } if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } } cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes(memory_location_C, &C_diag_i, &C_offd_i, rownnz_A, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[rownnz_A ? rownnz_A[ns] : ns]; jj_count_offd = C_offd_i[rownnz_A ? rownnz_A[ns] : ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, nrows_A, ncols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C, 0); hypre_ParCSRMatrixSetColStartsOwner(C, 0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixSetRownnz(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixSetRownnz(C_offd); hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } hypre_TFree(rownnz_A, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); HYPRE_BigInt first_row_index = row_starts[0]; num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] < 0) len++; } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] > 0) len++; } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } for (i = 0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i = 0; i < num_recvs; i++) { for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { B_ext_i[j+1] += B_ext_i[j]; } } *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; } for (i = 0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i = 0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; } for (i = 0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i = 0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i = 0; i < num_cols; i++) { AT_offd_i[i+1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i = 0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k = 0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i = num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i-1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i = 1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) { hypre_ParCSRMatrixOwnsColStarts(AT) = 0; } hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; } hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) { hypre_TFree(children, HYPRE_MEMORY_HOST); } /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; hypre_TFree(edges_marked, HYPRE_MEMORY_HOST); if (G_type != 0) { hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) { proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; } /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) { A_marker[i] = -1; } for (i = ns; i < ne; i++) { D_tmp[i] = 1.0/d[i]; } num_cols = C_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i = 0; i < num_cols_offd_B; i++) { A_marker[i] = -1; } num_cols = C_offd_i[ns]; for (i = ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul: * * Multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt nrows_A, ncols_A; HYPRE_BigInt nrows_B, ncols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (nrows_A != nrows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); hypre_CSRMatrixNumRownnz(C_tmp_offd) = 0; } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { temp[C_ext_offd_size++] = C_ext_j[j]; } else { C_ext_diag_size++; } } C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } for (i = 0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute: * C_diag = C_tmp_diag + C_ext_diag * C_offd = C_tmp_offd + C_ext_offd * * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } } } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else { C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else { C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } } break; } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, ncols_A, ncols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = ncols_A; hypre_ParCSRMatrixGlobalNumCols(C) = ncols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) { hypre_CSRMatrixSetRownnz(C_diag); hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_CSRMatrixSetRownnz(C_offd); hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i = 0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncCudaComputeStream(hypre_handle()); } #endif return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_TMemcpy(part, hypre_ParVectorPartitioning(b), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); hypre_TMemcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i-1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i+1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixAdd: performs C = alpha*A + beta*B * * A and B are assumed to have the same row and column partitionings *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { /* ParCSRMatrix data */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt num_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *rownnz_diag_A = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *rownnz_offd_A = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *rownnz_diag_B = hypre_CSRMatrixRownnz(B_diag); HYPRE_Int num_rownnz_diag_B = hypre_CSRMatrixNumRownnz(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *rownnz_offd_B = hypre_CSRMatrixRownnz(B_offd); HYPRE_Int num_rownnz_offd_B = hypre_CSRMatrixNumRownnz(B_offd); HYPRE_Int num_rows_offd_B = hypre_CSRMatrixNumRows(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd; /* C data */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C; HYPRE_BigInt *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *C_diag_i, *C_offd_i; HYPRE_Int *rownnz_diag_C = NULL; HYPRE_Int *rownnz_offd_C = NULL; HYPRE_Int num_rownnz_diag_C; HYPRE_Int num_rownnz_offd_C; HYPRE_Int num_rows_diag_C = num_rows_diag_A; HYPRE_Int num_cols_diag_C = num_cols_diag_A; HYPRE_Int num_rows_offd_C = num_rows_offd_A; HYPRE_Int num_cols_offd_C = num_cols_offd_A + num_cols_offd_B; HYPRE_Int *twspace; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); HYPRE_ANNOTATE_FUNC_BEGIN; hypre_assert(num_rows_A == num_rows_B); hypre_assert(num_cols_A == num_cols_B); hypre_assert(num_rows_diag_A == num_rows_diag_B); hypre_assert(num_cols_diag_A == num_cols_diag_B); /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_offd_A + 1, memory_location_C); col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); /* Compute num_cols_offd_C, A2C_offd, and B2C_offd*/ A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_union2(num_cols_offd_A, col_map_offd_A, num_cols_offd_B, col_map_offd_B, &num_cols_offd_C, col_map_offd_C, A2C_offd, B2C_offd); /* Set nonzero rows data of diag_C */ num_rownnz_diag_C = num_rows_diag_A; if ((num_rownnz_diag_A < num_rows_diag_A) && (num_rownnz_diag_B < num_rows_diag_B)) { hypre_MergeOrderedArrays( num_rownnz_diag_A, rownnz_diag_A, num_rownnz_diag_B, rownnz_diag_B, &num_rownnz_diag_C, &rownnz_diag_C); } /* Set nonzero rows data of offd_C */ num_rownnz_offd_C = num_rows_offd_A; if ((num_rownnz_offd_A < num_rows_offd_A) && (num_rownnz_offd_B < num_rows_offd_B)) { hypre_MergeOrderedArrays( num_rownnz_offd_A, rownnz_offd_A, num_rownnz_offd_B, rownnz_offd_B, &num_rownnz_offd_C, &rownnz_offd_C); } /* Set diag_C */ { HYPRE_Int ii, num_threads; HYPRE_Int size, rest, ns, ne; HYPRE_Int *marker_diag; HYPRE_Int *marker_offd; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /*----------------------------------------------------------------------- * Compute C_diag = alpha*A_diag + beta*B_diag *-----------------------------------------------------------------------*/ size = num_rownnz_diag_C/num_threads; rest = num_rownnz_diag_C - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } marker_diag = hypre_TAlloc(HYPRE_Int, num_cols_diag_A, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_diag, NULL, NULL, A_diag, B_diag, num_rows_diag_C, num_rownnz_diag_C, num_cols_diag_C, rownnz_diag_C, memory_location_C, C_diag_i, &C_diag); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_diag, NULL, NULL, rownnz_diag_C, alpha, beta, A_diag, B_diag, C_diag); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Compute C_offd = alpha*A_offd + beta*B_offd *-----------------------------------------------------------------------*/ size = num_rownnz_offd_C/num_threads; rest = num_rownnz_offd_C - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, A_offd, B_offd, num_rows_offd_C, num_rownnz_offd_C, num_cols_offd_C, rownnz_offd_C, memory_location_C, C_offd_i, &C_offd); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, rownnz_offd_C, alpha, beta, A_offd, B_offd, C_offd); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); } /* end of omp parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); /* Create ParCSRMatrix C */ row_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_TMemcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); C = hypre_ParCSRMatrixCreate(comm, num_rows_A, num_cols_A, row_starts_C, col_starts_C, num_cols_offd_C, hypre_CSRMatrixNumNonzeros(C_diag), hypre_CSRMatrixNumNonzeros(C_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); hypre_ParCSRMatrixDiag(C) = C_diag; hypre_ParCSRMatrixOffd(C) = C_offd; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *C_ptr = C; HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFnorm *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsInit *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i-1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsWait *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractSubmatrixFC * * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts_in, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts; HYPRE_Int nf_local, nc_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs, *sub_idx_diag, *sub_idx_offd; HYPRE_Int num_sends, *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { /* copy cpts_starts first */ HYPRE_Int len; len = 2; cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_TMemcpy(cpts_starts, cpts_starts_in, HYPRE_BigInt, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); if (my_id == (num_procs -1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm); } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ B_first_col = B_col_starts[0]; /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k-1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
#include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /*-------------------------------------------------------------------------- * hypre_ParMatmul_RowSizes: * * Computes sizes of C rows. Formerly part of hypre_ParMatmul but removed * so it can also be used for multiplication of Boolean matrices. * * Arrays computed: C_diag_i, C_offd_i. * * Arrays needed: (17, all HYPRE_Int*) * rownnz_A, * A_diag_i, A_diag_j, * A_offd_i, A_offd_j, * B_diag_i, B_diag_j, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j, * col_map_offd_B, col_map_offd_B, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j. * * Scalars computed: C_diag_size, C_offd_size. * * Scalars needed: * num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, * first_col_diag_B, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C *--------------------------------------------------------------------------*/ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int **C_diag_i, HYPRE_Int **C_offd_i, HYPRE_Int *rownnz_A, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Int *A_offd_i, HYPRE_Int *A_offd_j, HYPRE_Int *B_diag_i, HYPRE_Int *B_diag_j, HYPRE_Int *B_offd_i, HYPRE_Int *B_offd_j, HYPRE_Int *B_ext_diag_i, HYPRE_Int *B_ext_diag_j, HYPRE_Int *B_ext_offd_i, HYPRE_Int *B_ext_offd_j, HYPRE_Int *map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rownnz_A, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int size, rest, num_threads; HYPRE_Int ii, ns, ne; num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[ii1] = jj_row_begin_diag; (*C_offd_i)[ii1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct diag_i and offd_i - phase 1 */ if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { ii1 = rownnz_A ? rownnz_A[i1] : i1; (*C_diag_i)[ii1] += jj_count_diag; (*C_offd_i)[ii1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } /* Correct diag_i and offd_i - phase 2 */ if (rownnz_A != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i1 = ns; i1 < (ne-1); i1++) { for (ii1 = rownnz_A[i1] + 1; ii1 < rownnz_A[i1+1]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[i1+1]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[i1+1]]; } } if (ii < (num_threads - 1)) { for (ii1 = rownnz_A[ne-1] + 1; ii1 < rownnz_A[ne]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[ne]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[ne]]; } } else { for (ii1 = rownnz_A[ne-1] + 1; ii1 < num_rows_diag_A; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[num_rows_diag_A]; (*C_offd_i)[ii1] = (*C_offd_i)[num_rows_diag_A]; } } } } /* end parallel loop */ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; #ifdef HYPRE_DEBUG HYPRE_Int i; for (i = 0; i < num_rows_diag_A; i++) { hypre_assert((*C_diag_i)[i] <= (*C_diag_i)[i+1]); hypre_assert((*C_offd_i)[i] <= (*C_offd_i)[i+1]); } #endif hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul: * * Multiplies two ParCSRMatrices A and B and returns the product in * ParCSRMatrix C. * * Note: C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif /* ParCSRMatrix A */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rownnz_A; HYPRE_Int *rownnz_A = NULL; /* ParCSRMatrix B */ HYPRE_BigInt nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_BigInt last_col_diag_B; /* A_diag */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_diag_ir = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* A_offd */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *A_offd_ir = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); /* B_diag */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* B_offd */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); /* ParCSRMatrix C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C = NULL; /* C_diag */ hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; /* C_offd */ hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Int C_diag_size; /* Bs_ext */ hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (ncols_A != nrows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if C=A*B is square globally and locally, then C_diag should be square also */ if ( num_rows_diag_A == num_cols_diag_B && nrows_A == ncols_B ) { allsquare = 1; } /* Set rownnz of A */ if (num_rownnz_diag_A != num_rows_diag_A && num_rownnz_offd_A != num_rows_offd_A ) { hypre_MergeOrderedArrays(num_rownnz_diag_A, A_diag_ir, num_rownnz_offd_A, A_offd_ir, &num_rownnz_A, &rownnz_A); } else { num_rownnz_A = hypre_max(num_rows_diag_A, num_rows_offd_A); } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt) num_cols_diag_B - 1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); } } if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes(memory_location_C, &C_diag_i, &C_offd_i, rownnz_A, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ num_threads = hypre_NumActiveThreads(); size = num_rownnz_A/num_threads; rest = num_rownnz_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[rownnz_A ? rownnz_A[ns] : ns]; jj_count_offd = C_offd_i[rownnz_A ? rownnz_A[ns] : ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, nrows_A, ncols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C, 0); hypre_ParCSRMatrixSetColStartsOwner(C, 0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixSetRownnz(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixSetRownnz(C_offd); hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } hypre_TFree(rownnz_A, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); HYPRE_BigInt first_row_index = row_starts[0]; num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] < 0) len++; } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] > 0) len++; } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = 0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k = offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i = 0; i < num_recvs; i++) { for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { B_ext_i[j+1] += B_ext_i[j]; } } *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; } for (i = 0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i = 0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; } for (i = 0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i = 0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i = 0; i < num_cols; i++) { AT_offd_i[i+1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i = 0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k = 0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i = num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i-1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i = 1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i = 0; i < 2; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) { hypre_ParCSRMatrixOwnsColStarts(AT) = 0; } hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; } hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) { hypre_TFree(children, HYPRE_MEMORY_HOST); } /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; hypre_TFree(edges_marked, HYPRE_MEMORY_HOST); if (G_type != 0) { hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) { proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; } /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) { A_marker[i] = -1; } for (i = ns; i < ne; i++) { D_tmp[i] = 1.0/d[i]; } num_cols = C_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i = 0; i < num_cols_offd_B; i++) { A_marker[i] = -1; } num_cols = C_offd_i[ns]; for (i = ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul: * * Multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt nrows_A, ncols_A; HYPRE_BigInt nrows_B, ncols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (nrows_A != nrows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); hypre_CSRMatrixNumRownnz(C_tmp_offd) = 0; } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { temp[C_ext_offd_size++] = C_ext_j[j]; } else { C_ext_diag_size++; } } C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i+1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } for (i = 0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute: * C_diag = C_tmp_diag + C_ext_diag * C_offd = C_tmp_offd + C_ext_offd * * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } } } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else { C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else { C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } } break; } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, ncols_A, ncols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = ncols_A; hypre_ParCSRMatrixGlobalNumCols(C) = ncols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) { hypre_CSRMatrixSetRownnz(C_diag); hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_CSRMatrixSetRownnz(C_offd); hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i = 0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncCudaComputeStream(hypre_handle()); } #endif return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_TMemcpy(part, hypre_ParVectorPartitioning(b), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); hypre_TMemcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i-1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k) #endif for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i+1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixAdd: performs C = alpha*A + beta*B * * A and B are assumed to have the same row and column partitionings *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { /* ParCSRMatrix data */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt num_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *rownnz_diag_A = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *rownnz_offd_A = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *rownnz_diag_B = hypre_CSRMatrixRownnz(B_diag); HYPRE_Int num_rownnz_diag_B = hypre_CSRMatrixNumRownnz(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *rownnz_offd_B = hypre_CSRMatrixRownnz(B_offd); HYPRE_Int num_rownnz_offd_B = hypre_CSRMatrixNumRownnz(B_offd); HYPRE_Int num_rows_offd_B = hypre_CSRMatrixNumRows(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd; /* C data */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C; HYPRE_BigInt *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *C_diag_i, *C_offd_i; HYPRE_Int *rownnz_diag_C = NULL; HYPRE_Int *rownnz_offd_C = NULL; HYPRE_Int num_rownnz_diag_C; HYPRE_Int num_rownnz_offd_C; HYPRE_Int num_rows_diag_C = num_rows_diag_A; HYPRE_Int num_cols_diag_C = num_cols_diag_A; HYPRE_Int num_rows_offd_C = num_rows_offd_A; HYPRE_Int num_cols_offd_C = num_cols_offd_A + num_cols_offd_B; HYPRE_Int *twspace; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); HYPRE_ANNOTATE_FUNC_BEGIN; hypre_assert(num_rows_A == num_rows_B); hypre_assert(num_cols_A == num_cols_B); hypre_assert(num_rows_diag_A == num_rows_diag_B); hypre_assert(num_cols_diag_A == num_cols_diag_B); /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_offd_A + 1, memory_location_C); col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); /* Compute num_cols_offd_C, A2C_offd, and B2C_offd*/ A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_union2(num_cols_offd_A, col_map_offd_A, num_cols_offd_B, col_map_offd_B, &num_cols_offd_C, col_map_offd_C, A2C_offd, B2C_offd); /* Set nonzero rows data of diag_C */ num_rownnz_diag_C = num_rows_diag_A; if ((num_rownnz_diag_A < num_rows_diag_A) && (num_rownnz_diag_B < num_rows_diag_B)) { hypre_MergeOrderedArrays( num_rownnz_diag_A, rownnz_diag_A, num_rownnz_diag_B, rownnz_diag_B, &num_rownnz_diag_C, &rownnz_diag_C); } /* Set nonzero rows data of offd_C */ num_rownnz_offd_C = num_rows_offd_A; if ((num_rownnz_offd_A < num_rows_offd_A) && (num_rownnz_offd_B < num_rows_offd_B)) { hypre_MergeOrderedArrays( num_rownnz_offd_A, rownnz_offd_A, num_rownnz_offd_B, rownnz_offd_B, &num_rownnz_offd_C, &rownnz_offd_C); } /* Set diag_C */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ii, num_threads; HYPRE_Int size, rest, ns, ne; HYPRE_Int *marker_diag; HYPRE_Int *marker_offd; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /*----------------------------------------------------------------------- * Compute C_diag = alpha*A_diag + beta*B_diag *-----------------------------------------------------------------------*/ size = num_rownnz_diag_C/num_threads; rest = num_rownnz_diag_C - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } marker_diag = hypre_TAlloc(HYPRE_Int, num_cols_diag_A, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_diag, NULL, NULL, A_diag, B_diag, num_rows_diag_C, num_rownnz_diag_C, num_cols_diag_C, rownnz_diag_C, memory_location_C, C_diag_i, &C_diag); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_diag, NULL, NULL, rownnz_diag_C, alpha, beta, A_diag, B_diag, C_diag); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Compute C_offd = alpha*A_offd + beta*B_offd *-----------------------------------------------------------------------*/ size = num_rownnz_offd_C/num_threads; rest = num_rownnz_offd_C - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, A_offd, B_offd, num_rows_offd_C, num_rownnz_offd_C, num_cols_offd_C, rownnz_offd_C, memory_location_C, C_offd_i, &C_offd); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, rownnz_offd_C, alpha, beta, A_offd, B_offd, C_offd); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); } /* end of omp parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); /* Create ParCSRMatrix C */ row_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_TMemcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), HYPRE_BigInt, 2, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); C = hypre_ParCSRMatrixCreate(comm, num_rows_A, num_cols_A, row_starts_C, col_starts_C, num_cols_offd_C, hypre_CSRMatrixNumNonzeros(C_diag), hypre_CSRMatrixNumNonzeros(C_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); hypre_ParCSRMatrixDiag(C) = C_diag; hypre_ParCSRMatrixOffd(C) = C_offd; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *C_ptr = C; HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFnorm *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsInit *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i-1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsWait *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractSubmatrixFC * * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts_in, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts; HYPRE_Int nf_local, nc_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs, *sub_idx_diag, *sub_idx_offd; HYPRE_Int num_sends, *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { /* copy cpts_starts first */ HYPRE_Int len; len = 2; cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_TMemcpy(cpts_starts, cpts_starts_in, HYPRE_BigInt, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); if (my_id == (num_procs -1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm); } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ B_first_col = B_col_starts[0]; /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k-1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
pi-v12.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if _DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if _DEBUG_ start= omp_get_wtime(); #else Extrae_event (PROGRAM, PI_COMPUTATION); #endif omp_set_num_threads(3); /* do computation -- using just two threads */ // WARNING : correct code #pragma omp parallel private(i,x) num_threads(2) reduction(+:sum) { #if _DEBUG_ int id = omp_get_thread_num(); #endif #pragma omp for schedule(static) for (i=0; i < num_steps; i++) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } } #pragma omp parallel pi = step * sum; #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #else Extrae_event (PROGRAM, END); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum = 0.0, pi = 0.0; #if _DEBUG_ double start, end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0 / (double)num_steps; #if _DEBUG_ start = omp_get_wtime(); #else Extrae_event(PROGRAM, PI_COMPUTATION); #endif omp_set_num_threads(3); /* do computation -- using just two threads */ //WARNING:correct code #if _DEBUG_ int id = omp_get_thread_num(); #endif for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); #if _DEBUG_ printf("thread id:%d it:%d\n", id, i); #endif } pi = step * sum; #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end - start); #else Extrae_event(PROGRAM, END); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #include "extrae_user_events.h" #define PROGRAM 1000 #define PI_COMPUTATION 1 #define END 0 #endif int main(int argc, char *argv[]) { double x, sum = 0.0, pi = 0.0; #if _DEBUG_ double start, end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0 / (double)num_steps; #if _DEBUG_ start = omp_get_wtime(); #else Extrae_event(PROGRAM, PI_COMPUTATION); #endif omp_set_num_threads(3); /* do computation -- using just two threads */ //WARNING:correct code #pragma omp parallel private(i,x) num_threads(2) reduction(+:sum) { #if _DEBUG_ int id = omp_get_thread_num(); #endif #pragma omp for schedule(static) for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); #if _DEBUG_ printf("thread id:%d it:%d\n", id, i); #endif } } #pragma omp parallel pi = step * sum; #if _DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end - start); #else Extrae_event(PROGRAM, END); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
pr27573.c
/* PR middle-end/27573 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp -fprofile-generate" } */ extern int puts (const char *); int main (void) { int i, j = 8; #pragma omp parallel { puts ("foo"); for (i = 1; i < j - 1; i++) ; } return 0; } /* { dg-final { cleanup-coverage-files } } */
/* PR middle-end/27573 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp -fprofile-generate" } */ extern int puts(const char *); int main(void) { int i, j = 8; puts("foo"); for (i = 1; i < j - 1; i++) ; return 0; } /* { dg-final { cleanup-coverage-files } } */
/* PR middle-end/27573 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp -fprofile-generate" } */ extern int puts(const char *); int main(void) { int i, j = 8; #pragma omp parallel { puts("foo"); for (i = 1; i < j - 1; i++) ; } return 0; } /* { dg-final { cleanup-coverage-files } } */
veccopy-ompt-target-tracing.c
#include <stdio.h> #include <assert.h> #include <omp.h> #include "callbacks.h" static int start_trace(); static int flush_trace(); static int stop_trace(); int main() { int N = 100000; int a[N]; int b[N]; int i; for (i=0; i<N; i++) a[i]=0; for (i=0; i<N; i++) b[i]=i; start_trace(); #pragma omp target parallel for { for (int j = 0; j< N; j++) a[j]=b[j]; } flush_trace(); stop_trace(); start_trace(); #pragma omp target teams distribute parallel for { for (int j = 0; j< N; j++) a[j]=b[j]; } stop_trace(); int rc = 0; for (i=0; i<N; i++) if (a[i] != b[i] ) { rc++; printf ("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; }
#include <stdio.h> #include <assert.h> #include <omp.h> #include "callbacks.h" static int start_trace(); static int flush_trace(); static int stop_trace(); int main() { int N = 100000; int a[N]; int b[N]; int i; for (i = 0; i < N; i++) a[i] = 0; for (i = 0; i < N; i++) b[i] = i; start_trace(); for (int j = 0; j < N; j++) a[j] = b[j]; flush_trace(); stop_trace(); start_trace(); for (int j = 0; j < N; j++) a[j] = b[j]; stop_trace(); int rc = 0; for (i = 0; i < N; i++) if (a[i] != b[i]) { rc++; printf("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; }
#include <stdio.h> #include <assert.h> #include <omp.h> #include "callbacks.h" static int start_trace(); static int flush_trace(); static int stop_trace(); int main() { int N = 100000; int a[N]; int b[N]; int i; for (i = 0; i < N; i++) a[i] = 0; for (i = 0; i < N; i++) b[i] = i; start_trace(); #pragma omp target parallel for { for (int j = 0; j < N; j++) a[j] = b[j]; } flush_trace(); stop_trace(); start_trace(); #pragma omp target teams distribute parallel for { for (int j = 0; j < N; j++) a[j] = b[j]; } stop_trace(); int rc = 0; for (i = 0; i < N; i++) if (a[i] != b[i]) { rc++; printf("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; }
randomized_rm.h
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & University of Surrey for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_RANDOMIZED_RM_H_ #define CORE_RANDOMIZED_RM_H_ #include <algorithm> #include "core/resource_manager.h" #ifdef LINUX #include <parallel/algorithm> #endif // LINUX namespace bdm { template <typename TBaseRm> class RandomizedRm : public TBaseRm { public: explicit RandomizedRm(TRootIOCtor* r) {} RandomizedRm(); virtual ~RandomizedRm(); void EndOfIteration() override; protected: BDM_CLASS_DEF_NV(RandomizedRm, 1); }; // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::RandomizedRm() {} // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::~RandomizedRm() {} struct Ubrng { using result_type = uint32_t; Random* random; Ubrng(Random* random) : random(random) {} static constexpr result_type min() { return 0; } static constexpr result_type max() { return std::numeric_limits<result_type>::max(); } result_type operator()() { return random->Integer(std::numeric_limits<result_type>::max()); } }; // ----------------------------------------------------------------------------- template <typename TBaseRm> void RandomizedRm<TBaseRm>::EndOfIteration() { TBaseRm::EndOfIteration(); // shuffle #pragma omp parallel for schedule(static, 1) for (uint64_t n = 0; n < this->agents_.size(); ++n) { #ifdef LINUX __gnu_parallel::random_shuffle(this->agents_[n].begin(), this->agents_[n].end()); #else auto* random = Simulation::GetActive()->GetRandom(); std::shuffle(this->agents_[n].begin(), this->agents_[n].end(), Ubrng(random)); #endif // LINUX } // update uid_ah_map_ auto update_agent_map = L2F([this](Agent* a, AgentHandle ah) { this->uid_ah_map_.Insert(a->GetUid(), ah); }); TBaseRm::ForEachAgentParallel(update_agent_map); } } // namespace bdm #endif // CORE_RANDOMIZED_RM_H_
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & University of Surrey for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_RANDOMIZED_RM_H_ #define CORE_RANDOMIZED_RM_H_ #include <algorithm> #include "core/resource_manager.h" #ifdef LINUX #include <parallel/algorithm> #endif // LINUX namespace bdm { template <typename TBaseRm> class RandomizedRm : public TBaseRm { public: explicit RandomizedRm(TRootIOCtor* r) {} RandomizedRm(); virtual ~RandomizedRm(); void EndOfIteration() override; protected: BDM_CLASS_DEF_NV(RandomizedRm, 1); }; // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::RandomizedRm() {} // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::~RandomizedRm() {} struct Ubrng { using result_type = uint32_t; Random* random; Ubrng(Random* random) : random(random) {} static constexpr result_type min() { return 0; } static constexpr result_type max() { return std::numeric_limits<result_type>::max(); } result_type operator()() { return random->Integer(std::numeric_limits<result_type>::max()); } }; // ----------------------------------------------------------------------------- template <typename TBaseRm> void RandomizedRm<TBaseRm>::EndOfIteration() { TBaseRm::EndOfIteration(); // shuffle for (uint64_t n = 0; n < this->agents_.size(); ++n) { #ifdef LINUX __gnu_parallel::random_shuffle(this->agents_[n].begin(), this->agents_[n].end()); #else auto* random = Simulation::GetActive()->GetRandom(); std::shuffle(this->agents_[n].begin(), this->agents_[n].end(), Ubrng(random)); #endif // LINUX } // update uid_ah_map_ auto update_agent_map = L2F([this](Agent* a, AgentHandle ah) { this->uid_ah_map_.Insert(a->GetUid(), ah); }); TBaseRm::ForEachAgentParallel(update_agent_map); } } // namespace bdm #endif // CORE_RANDOMIZED_RM_H_
// ----------------------------------------------------------------------------- // // Copyright (C) 2021 CERN & University of Surrey for the benefit of the // BioDynaMo collaboration. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_RANDOMIZED_RM_H_ #define CORE_RANDOMIZED_RM_H_ #include <algorithm> #include "core/resource_manager.h" #ifdef LINUX #include <parallel/algorithm> #endif // LINUX namespace bdm { template <typename TBaseRm> class RandomizedRm : public TBaseRm { public: explicit RandomizedRm(TRootIOCtor* r) {} RandomizedRm(); virtual ~RandomizedRm(); void EndOfIteration() override; protected: BDM_CLASS_DEF_NV(RandomizedRm, 1); }; // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::RandomizedRm() {} // ----------------------------------------------------------------------------- template <typename TBaseRm> RandomizedRm<TBaseRm>::~RandomizedRm() {} struct Ubrng { using result_type = uint32_t; Random* random; Ubrng(Random* random) : random(random) {} static constexpr result_type min() { return 0; } static constexpr result_type max() { return std::numeric_limits<result_type>::max(); } result_type operator()() { return random->Integer(std::numeric_limits<result_type>::max()); } }; // ----------------------------------------------------------------------------- template <typename TBaseRm> void RandomizedRm<TBaseRm>::EndOfIteration() { TBaseRm::EndOfIteration(); // shuffle #pragma omp parallel for schedule(static, 1) for (uint64_t n = 0; n < this->agents_.size(); ++n) { #ifdef LINUX __gnu_parallel::random_shuffle(this->agents_[n].begin(), this->agents_[n].end()); #else auto* random = Simulation::GetActive()->GetRandom(); std::shuffle(this->agents_[n].begin(), this->agents_[n].end(), Ubrng(random)); #endif // LINUX } // update uid_ah_map_ auto update_agent_map = L2F([this](Agent* a, AgentHandle ah) { this->uid_ah_map_.Insert(a->GetUid(), ah); }); TBaseRm::ForEachAgentParallel(update_agent_map); } } // namespace bdm #endif // CORE_RANDOMIZED_RM_H_
saxpy.c
/** * @file saxpy.c * * @brief saxpy performs the \c axpy computation in single-precision on both * host and accelerator. The performance (in MFLOPS) on host and accelerator is * compared and the numerical results are also verified for consistency. * * The \c axpy computation is defined as: * * y := a * x + y * * where: * * - a is a scalar. * - x and y are vectors each with n elements. * * Please note that in this version only <em>one GPU thread</em> is used. * * Offload to GPU: * * gcc -fopenmp -foffload=nvptx-none saxpy.c -o saxpy * clang -fopenmp -O3 -fopenmp-targets=nvptx64-nvidia-cuda saxpy.c -o saxpy SOURCE: https://github.com/pc2/OMP-Offloading/blob/master/simplifiedCode/05_saxpy/saxpy.c */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #define TWO27 (1 << 27) int main(int argc, char *argv[]) { int i, n = TWO27, iret = 0; float a = 101.0f / TWO02, *x, *y, *z; struct timespec rt[2]; double wt; // walltime /* * 0. prepare x, y, and z * * y := a * x + y (on host) * z := a * x + z (on accel) */ if (NULL == (x = (float *) malloc(sizeof(*x) * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } if (NULL == (y = (float *) malloc(sizeof(*y) * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (z = (float *) malloc(sizeof(*z) * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { free(x); free(y); free(z); exit(EXIT_FAILURE); } for (i = 0; i < n; i++) { x[i] = rand() % TWO04 / (float) TWO02; y[i] = z[i] = rand() % TWO08 / (float) TWO04; } /* * 1. saxpy on host */ clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp parallel \ default(none) shared(n, a, x, y) private(i) { #pragma omp for simd schedule(simd:static) for (i = 0; i < n; i++) { y[i] = a * x[i] + y[i]; } } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on host : %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 2. saxpy on accel */ clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target device(0) \ map(to:n, a, x[0:n]) map(tofrom:z[0:n]) private(i) { for (i = 0; i < n; i++) { z[i] = a * x[i] + z[i]; } } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 3. verify numerical consistency */ for (i = 0; i < n; i++) { iret = *(int *) (y + i) ^ *(int *) (z + i); assert(iret == 0); } return 0; }
/** * @file saxpy.c * * @brief saxpy performs the \c axpy computation in single-precision on both * host and accelerator. The performance (in MFLOPS) on host and accelerator is * compared and the numerical results are also verified for consistency. * * The \c axpy computation is defined as: * * y := a * x + y * * where: * * - a is a scalar. * - x and y are vectors each with n elements. * * Please note that in this version only <em>one GPU thread</em> is used. * * Offload to GPU: * * gcc -fopenmp -foffload=nvptx-none saxpy.c -o saxpy * clang -fopenmp -O3 -fopenmp-targets=nvptx64-nvidia-cuda saxpy.c -o saxpy SOURCE: https://github.com/pc2/OMP-Offloading/blob/master/simplifiedCode/05_saxpy/saxpy.c */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #define TWO27 (1 << 27) int main(int argc, char *argv[]) { int i, n = TWO27, iret = 0; float a = 101.0 f / TWO02, *x, *y, *z; struct timespec rt[2]; double wt; //walltime /* * 0. prepare x, y, and z * * y := a * x + y (on host) z := a * x + z (on accel) */ if (NULL == (x = (float *)malloc(sizeof(*x) * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } if (NULL == (y = (float *)malloc(sizeof(*y) * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (z = (float *)malloc(sizeof(*z) * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { free(x); free(y); free(z); exit(EXIT_FAILURE); } for (i = 0; i < n; i++) { x[i] = rand() % TWO04 / (float)TWO02; y[i] = z[i] = rand() % TWO08 / (float)TWO04; } /* * 1. saxpy on host */ clock_gettime(CLOCK_REALTIME, rt + 0); default (none) shared(n, a, x, y) private(i) { for (i = 0; i < n; i++) { y[i] = a * x[i] + y[i]; } } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on host : %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 2. saxpy on accel */ clock_gettime(CLOCK_REALTIME, rt + 0); map(to: n, a, x[0: n]) map(tofrom: z[0:n]) private(i) { for (i = 0; i < n; i++) { z[i] = a * x[i] + z[i]; } } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 3. verify numerical consistency */ for (i = 0; i < n; i++) { iret = *(int *)(y + i) ^ *(int *)(z + i); assert(iret == 0); } return 0; }
/** * @file saxpy.c * * @brief saxpy performs the \c axpy computation in single-precision on both * host and accelerator. The performance (in MFLOPS) on host and accelerator is * compared and the numerical results are also verified for consistency. * * The \c axpy computation is defined as: * * y := a * x + y * * where: * * - a is a scalar. * - x and y are vectors each with n elements. * * Please note that in this version only <em>one GPU thread</em> is used. * * Offload to GPU: * * gcc -fopenmp -foffload=nvptx-none saxpy.c -o saxpy * clang -fopenmp -O3 -fopenmp-targets=nvptx64-nvidia-cuda saxpy.c -o saxpy SOURCE: https://github.com/pc2/OMP-Offloading/blob/master/simplifiedCode/05_saxpy/saxpy.c */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #define TWO27 (1 << 27) int main(int argc, char *argv[]) { int i, n = TWO27, iret = 0; float a = 101.0 f / TWO02, *x, *y, *z; struct timespec rt[2]; double wt; //walltime /* * 0. prepare x, y, and z * * y := a * x + y (on host) z := a * x + z (on accel) */ if (NULL == (x = (float *)malloc(sizeof(*x) * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } if (NULL == (y = (float *)malloc(sizeof(*y) * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (z = (float *)malloc(sizeof(*z) * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { free(x); free(y); free(z); exit(EXIT_FAILURE); } for (i = 0; i < n; i++) { x[i] = rand() % TWO04 / (float)TWO02; y[i] = z[i] = rand() % TWO08 / (float)TWO04; } /* * 1. saxpy on host */ clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp parallel \ default(none) shared(n, a, x, y) private(i) { #pragma omp for simd schedule(simd:static) for (i = 0; i < n; i++) { y[i] = a * x[i] + y[i]; } } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on host : %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 2. saxpy on accel */ clock_gettime(CLOCK_REALTIME, rt + 0); #pragma omp target device(0) \ map(to:n, a, x[0:n]) map(tofrom:z[0:n]) private(i) { for (i = 0; i < n; i++) { z[i] = a * x[i] + z[i]; } } clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on accel: %9.3f sec %9.1f MFLOPS\n", wt, 2.0 * n / (1.0e6 * wt)); /* * 3. verify numerical consistency */ for (i = 0; i < n; i++) { iret = *(int *)(y + i) ^ *(int *)(z + i); assert(iret == 0); } return 0; }
lu.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU This benchmark is an OpenMP C version of the NPB LU code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: S. Weeratunga V. Venkatakrishnan E. Barszcz M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "applu.h" #if defined(_OPENMP) /* for thread synchronization */ #endif /* _OPENMP */ /* function declarations */ #include <omp.h> static void blts(int nx,int ny,int nz,int k,double omega,double v[64][65][65][5],double ldz[64][64][5][5],double ldy[64][64][5][5],double ldx[64][64][5][5],double d[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0); static void buts(int nx,int ny,int nz,int k,double omega,double v[64][65][65][5],double tv[64][64][5],double d[64][64][5][5],double udx[64][64][5][5],double udy[64][64][5][5],double udz[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0); static void domain(); static void erhs(); static void error(); static void exact(int i,int j,int k,double u000ijk[5]); static void jacld(int k); static void jacu(int k); static void l2norm(int nx0,int ny0,int nz0,int ist,int iend,int jst,int jend,double v[64][65][65][5],double sum[5]); static void pintgr(); static void read_input(); static void rhs(); static void setbv(); static void setcoeff(); static void setiv(); static void ssor(); static void verify(double xcr[5],double xce[5],double xci,char *class,boolean *verified); /*-------------------------------------------------------------------- program applu --------------------------------------------------------------------*/ int main(int argc,char **argv) { /*-------------------------------------------------------------------- c c driver for the performance evaluation of the solver for c five coupled parabolic/elliptic partial differential equations. c --------------------------------------------------------------------*/ char class; boolean verified; double mflops; int nthreads = 1; /*-------------------------------------------------------------------- c read input data --------------------------------------------------------------------*/ read_input(); /*-------------------------------------------------------------------- c set up domain sizes --------------------------------------------------------------------*/ domain(); /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ setcoeff(); /*-------------------------------------------------------------------- c set the boundary values for dependent variables --------------------------------------------------------------------*/ setbv(); /*-------------------------------------------------------------------- c set the initial values for dependent variables --------------------------------------------------------------------*/ setiv(); /*-------------------------------------------------------------------- c compute the forcing term based on prescribed exact solution --------------------------------------------------------------------*/ erhs(); { #if defined(_OPENMP) #endif /* _OPENMP */ } /*-------------------------------------------------------------------- c perform the SSOR iterations --------------------------------------------------------------------*/ ssor(); /*-------------------------------------------------------------------- c compute the solution error --------------------------------------------------------------------*/ error(); /*-------------------------------------------------------------------- c compute the surface integral --------------------------------------------------------------------*/ pintgr(); /*-------------------------------------------------------------------- c verification test --------------------------------------------------------------------*/ verify(rsdnm,errnm,frc,&class,&verified); mflops = ((double )itmax) * (1984.77 * ((double )nx0) * ((double )ny0) * ((double )nz0) - 10923.3 * (((double )(nx0 + ny0 + nz0)) / 3.0 * (((double )(nx0 + ny0 + nz0)) / 3.0)) + 27770.9 * ((double )(nx0 + ny0 + nz0)) / 3.0 - 144010.0) / (maxtime * 1000000.0); c_print_results("LU",class,nx0,ny0,nz0,itmax,nthreads,maxtime,mflops," floating point",verified,"3.0 structured","01 Dec 2019","(none)","(none)","-lm","(none)","(none)","(none)","(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void blts(int nx,int ny,int nz,int k,double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double ldz[64][64][5][5],double ldy[64][64][5][5],double ldx[64][64][5][5],double d[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block lower triangular solution: c c v <-- ( L-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; #pragma omp parallel for private (i,j,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (k,omega) for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]); } } } for (i = ist; i <= iend; i += 1) { #if defined(_OPENMP) #endif /* _OPENMP */ for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (omega) for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion c c forward elimination --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } #if defined(_OPENMP) #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void buts(int nx,int ny,int nz,int k,double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double tv[64][64][5],double d[64][64][5][5],double udx[64][64][5][5],double udy[64][64][5][5],double udz[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block upper triangular solution: c c v <-- ( U-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; #pragma omp parallel for private (i,j,m) for (i = iend; i >= ist; i += -1) { #pragma omp parallel for private (j,m) for (j = jend; j >= jst; j += -1) { #pragma omp parallel for private (m) firstprivate (k,omega) for (m = 0; m <= 4; m += 1) { tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]); } } } for (i = iend; i >= ist; i += -1) { #if defined(_OPENMP) #endif /* _OPENMP */ for (j = jend; j >= jst; j += -1) { #pragma omp parallel for private (m) firstprivate (omega) for (m = 0; m <= 4; m += 1) { tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; tv[i][j][3] = tv[i][j][3] / tmat[3][3]; tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; tv[i][j][2] = tv[i][j][2] / tmat[2][2]; tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; tv[i][j][1] = tv[i][j][1] / tmat[1][1]; tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; tv[i][j][0] = tv[i][j][0] / tmat[0][0]; v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } #if defined(_OPENMP) #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void domain() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ nx = nx0; ny = ny0; nz = nz0; /*-------------------------------------------------------------------- c check the sub-domain size --------------------------------------------------------------------*/ if (nx < 4 || ny < 4 || nz < 4) { printf(" SUBDOMAIN SIZE IS TOO SMALL - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n TO 4 THEY ARE CURRENTLY%3d%3d%3d\n",nx,ny,nz); exit(1); } if (nx > 64 || ny > 64 || nz > 64) { printf(" SUBDOMAIN SIZE IS TOO LARGE - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n CURRENTLY%4d%4d%4d\n",nx,ny,nz); exit(1); } /*-------------------------------------------------------------------- c set up the start and end in i and j extents for all processors --------------------------------------------------------------------*/ ist = 1; iend = nx - 2; jst = 1; jend = ny - 2; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void erhs() { { /*-------------------------------------------------------------------- c c compute the right hand side based on exact solution c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double dsspm; double xi; double eta; double zeta; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; dsspm = dssp; #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= nx - 1; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= ny - 1; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= nz - 1; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = 0.0; } } } } #pragma omp parallel for private (iglob,jglob,xi,eta,zeta,i,j,k,m) firstprivate (nx0) for (i = 0; i <= nx - 1; i += 1) { iglob = i; xi = ((double )iglob) / (nx0 - 1); #pragma omp parallel for private (jglob,eta,zeta,j,k,m) firstprivate (ny0) for (j = 0; j <= ny - 1; j += 1) { jglob = j; eta = ((double )jglob) / (ny0 - 1); #pragma omp parallel for private (zeta,k,m) for (k = 0; k <= nz - 1; k += 1) { zeta = ((double )k) / (nz - 1); #pragma omp parallel for private (m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2) for (i = L1; i <= L2; i += 1) { #pragma omp parallel for private (q,u21,j,k) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u21,k) for (k = 1; k <= nz - 1 - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][1]; u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][2] = rsd[i][j][k][2] * u21; flux[i][j][k][3] = rsd[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,k,m) firstprivate (nx,L2) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (i,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (m) firstprivate (tx2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,tmp,u21i,u31i,u41i,u51i,i) for (i = ist; i <= L2; i += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21i = tmp * rsd[i][j][k][1]; u31i = tmp * rsd[i][j][k][2]; u41i = tmp * rsd[i][j][k][3]; u51i = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i - 1][j][k][0]; u21im1 = tmp * rsd[i - 1][j][k][1]; u31im1 = tmp * rsd[i - 1][j][k][2]; u41im1 = tmp * rsd[i - 1][j][k][3]; u51im1 = tmp * rsd[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5) for (i = ist; i <= iend; i += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (- 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; #pragma omp parallel for private (i,m) firstprivate (iend1) for (i = ist1; i <= iend1; i += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u31,j,k) for (j = L1; j <= L2; j += 1) { #pragma omp parallel for private (q,u31,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = rsd[i][j][k][2]; u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u31; flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][3] = rsd[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,i,j,k,m) firstprivate (nz) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,j,k,m) firstprivate (ny,L2) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (ty2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,tmp,u21j,u31j,u41j,u51j,j) for (j = jst; j <= L2; j += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21j = tmp * rsd[i][j][k][1]; u31j = tmp * rsd[i][j][k][2]; u41j = tmp * rsd[i][j][k][3]; u51j = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j - 1][k][0]; u21jm1 = tmp * rsd[i][j - 1][k][1]; u31jm1 = tmp * rsd[i][j - 1][k][2]; u41jm1 = tmp * rsd[i][j - 1][k][3]; u51jm1 = tmp * rsd[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5) for (j = jst; j <= jend; j += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (- 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; #pragma omp parallel for private (j,m) firstprivate (jend1) for (j = jst1; j <= jend1; j += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,i,j,k,m) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u41,k) for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][3]; u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u41; flux[i][j][k][2] = rsd[i][j][k][2] * u41; flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tz2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,tmp,u21k,k) for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21k = tmp * rsd[i][j][k][1]; u31k = tmp * rsd[i][j][k][2]; u41k = tmp * rsd[i][j][k][3]; u51k = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j][k - 1][0]; u21km1 = tmp * rsd[i][j][k - 1][1]; u31km1 = tmp * rsd[i][j][k - 1][2]; u41km1 = tmp * rsd[i][j][k - 1][3]; u51km1 = tmp * rsd[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5) for (k = 1; k <= nz - 2; k += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } #pragma omp parallel for private (k,m) for (k = 3; k <= nz - 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error() { /*-------------------------------------------------------------------- c c compute the solution error c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double tmp; double u000ijk[5]; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { errnm[m] = 0.0; } for (i = ist; i <= iend; i += 1) { iglob = i; for (j = jst; j <= jend; j += 1) { jglob = j; for (k = 1; k <= nz - 2; k += 1) { exact(iglob,jglob,k,u000ijk); #pragma omp parallel for private (tmp,m) for (m = 0; m <= 4; m += 1) { tmp = u000ijk[m] - u[i][j][k][m]; errnm[m] = errnm[m] + tmp * tmp; } } } } for (m = 0; m <= 4; m += 1) { errnm[m] = sqrt(errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact(int i,int j,int k,double u000ijk[5]) { /*-------------------------------------------------------------------- c c compute the exact solution at (i,j,k) c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int m; double xi; double eta; double zeta; xi = ((double )i) / (nx0 - 1); eta = ((double )j) / (ny0 - 1); zeta = ((double )k) / (nz - 1); #pragma omp parallel for private (m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacld(int k) { /*-------------------------------------------------------------------- c compute the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt) for (j = jst; j <= jend; j += 1) { /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k - 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tz1 * dz1; a[i][j][0][1] = 0.0; a[i][j][0][2] = 0.0; a[i][j][0][3] = -dt * tz2; a[i][j][0][4] = 0.0; a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]); a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; a[i][j][1][2] = 0.0; a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1); a[i][j][1][4] = 0.0; a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]); a[i][j][2][1] = 0.0; a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1); a[i][j][2][4] = 0.0; a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]); a[i][j][3][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * tmp1)); a[i][j][3][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * tmp1)); a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; a[i][j][3][4] = -dt * tz2 * 0.40e+00; a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]); a[i][j][4][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1]; a[i][j][4][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2]; a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3]; a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j - 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = -dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]); b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]); b[i][j][2][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * tmp1)); b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][3] * tmp1)); b[i][j][2][4] = -dt * ty2 * 0.40e+00; b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1); b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j - 1][k][1] * u[i][j - 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j - 1][k][2] * u[i][j - 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j - 1][k][3] * u[i][j - 1][k][3]) - c1345 * tmp2 * u[i][j - 1][k][4]); b[i][j][4][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1]; b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2]; b[i][j][4][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3]; b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i - 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tx1 * dx1; c[i][j][0][1] = -dt * tx2; c[i][j][0][2] = 0.0; c[i][j][0][3] = 0.0; c[i][j][0][4] = 0.0; c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]); c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; c[i][j][1][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * tmp1)); c[i][j][1][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * tmp1)); c[i][j][1][4] = -dt * tx2 * 0.40e+00; c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]); c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1); c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; c[i][j][2][3] = 0.0; c[i][j][2][4] = 0.0; c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]); c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1); c[i][j][3][2] = 0.0; c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; c[i][j][3][4] = 0.0; c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i - 1][j][k][1] * u[i - 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][2] * u[i - 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][3] * u[i - 1][j][k][3]) - c1345 * tmp2 * u[i - 1][j][k][4]); c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1]; c[i][j][4][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2]; c[i][j][4][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3]; c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacu(int k) { /*-------------------------------------------------------------------- c compute the upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; #if defined(_OPENMP) #else #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt) for (j = jst; j <= jend; j += 1) { #endif /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i + 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tx1 * dx1; a[i][j][0][1] = dt * tx2; a[i][j][0][2] = 0.0; a[i][j][0][3] = 0.0; a[i][j][0][4] = 0.0; a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]); a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; a[i][j][1][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * tmp1)); a[i][j][1][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * tmp1)); a[i][j][1][4] = dt * tx2 * 0.40e+00; a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]); a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1); a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; a[i][j][2][3] = 0.0; a[i][j][2][4] = 0.0; a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]); a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1); a[i][j][3][2] = 0.0; a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; a[i][j][3][4] = 0.0; a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i + 1][j][k][1] * u[i + 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][2] * u[i + 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][3] * u[i + 1][j][k][3]) - c1345 * tmp2 * u[i + 1][j][k][4]); a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1]; a[i][j][4][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2]; a[i][j][4][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3]; a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j + 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]); b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]); b[i][j][2][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * tmp1)); b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][3] * tmp1)); b[i][j][2][4] = dt * ty2 * 0.40e+00; b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1); b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j + 1][k][1] * u[i][j + 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j + 1][k][2] * u[i][j + 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j + 1][k][3] * u[i][j + 1][k][3]) - c1345 * tmp2 * u[i][j + 1][k][4]); b[i][j][4][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1]; b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2]; b[i][j][4][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3]; b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k + 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tz1 * dz1; c[i][j][0][1] = 0.0; c[i][j][0][2] = 0.0; c[i][j][0][3] = dt * tz2; c[i][j][0][4] = 0.0; c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]); c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; c[i][j][1][2] = 0.0; c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1); c[i][j][1][4] = 0.0; c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]); c[i][j][2][1] = 0.0; c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1); c[i][j][2][4] = 0.0; c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]); c[i][j][3][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * tmp1)); c[i][j][3][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * tmp1)); c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; c[i][j][3][4] = dt * tz2 * 0.40e+00; c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k + 1][1] * u[i][j][k + 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k + 1][2] * u[i][j][k + 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k + 1][3] * u[i][j][k + 1][3]) - c1345 * tmp2 * u[i][j][k + 1][4]); c[i][j][4][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1]; c[i][j][4][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2]; c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3]; c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void l2norm(int nx0,int ny0,int nz0,int ist,int iend,int jst,int jend, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double sum[5]) { { /*-------------------------------------------------------------------- c to compute the l2-norm of vector v. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; double sum0 = 0.0; double sum1 = 0.0; double sum2 = 0.0; double sum3 = 0.0; double sum4 = 0.0; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { sum[m] = 0.0; } #pragma omp parallel for private (i,j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k) reduction (+:sum0,sum1,sum2,sum3,sum4) for (k = 1; k <= nz0 - 2; k += 1) { sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } { sum[0] += sum0; sum[1] += sum1; sum[2] += sum2; sum[3] += sum3; sum[4] += sum4; } for (m = 0; m <= 4; m += 1) { sum[m] = sqrt(sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pintgr() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int ibeg; int ifin; int ifin1; int jbeg; int jfin; int jfin1; int iglob; int iglob1; int iglob2; int jglob; int jglob1; int jglob2; /* phi1(0:isiz2+1,0:isiz3+1) */ double phi1[66][66]; /* phi2(0:isiz2+1,0:isiz3+1) */ double phi2[66][66]; double frc1; double frc2; double frc3; /*-------------------------------------------------------------------- c set up the sub-domains for integeration in each processor --------------------------------------------------------------------*/ ibeg = nx; ifin = 0; iglob1 = - 1; iglob2 = nx - 1; if (iglob1 >= ii1 && iglob2 < ii2 + nx) ibeg = 0; if (iglob1 >= ii1 - nx && iglob2 <= ii2) ifin = nx; if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1; if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2; jbeg = ny; jfin = - 1; jglob1 = 0; jglob2 = ny - 1; if (jglob1 >= ji1 && jglob2 < ji2 + ny) jbeg = 0; if (jglob1 > ji1 - ny && jglob2 <= ji2) jfin = ny; if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1; if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2; ifin1 = ifin; jfin1 = jfin; if (ifin1 == ii2) ifin1 = ifin - 1; if (jfin1 == ji2) jfin1 = jfin - 1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } #pragma omp parallel for private (k,iglob,jglob,i,j) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k,jglob,j) for (j = jbeg; j <= jfin; j += 1) { jglob = j; k = ki1; phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); k = ki2; phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); } } frc1 = 0.0; #pragma omp parallel for private (i,j) reduction (+:frc1) for (i = ibeg; i <= ifin1; i += 1) { #pragma omp parallel for private (j) reduction (+:frc1) for (j = jbeg; j <= jfin1; j += 1) { frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]); } } frc1 = dxi * deta * frc1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } jglob = jbeg; if (jglob == ji1) { #pragma omp parallel for private (iglob,i,k) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k) for (k = ki1; k <= ki2; k += 1) { phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (u[i][jbeg][k][1] * u[i][jbeg][k][1] + u[i][jbeg][k][2] * u[i][jbeg][k][2] + u[i][jbeg][k][3] * u[i][jbeg][k][3]) / u[i][jbeg][k][0]); } } } jglob = jfin; if (jglob == ji2) { #pragma omp parallel for private (iglob,i,k) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k) for (k = ki1; k <= ki2; k += 1) { phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (u[i][jfin][k][1] * u[i][jfin][k][1] + u[i][jfin][k][2] * u[i][jfin][k][2] + u[i][jfin][k][3] * u[i][jfin][k][3]) / u[i][jfin][k][0]); } } } frc2 = 0.0; #pragma omp parallel for private (i,k) reduction (+:frc2) firstprivate (ifin1) for (i = ibeg; i <= ifin1; i += 1) { #pragma omp parallel for private (k) reduction (+:frc2) for (k = ki1; k <= ki2 - 1; k += 1) { frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]); } } frc2 = dxi * dzeta * frc2; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } iglob = ibeg; if (iglob == ii1) { #pragma omp parallel for private (jglob,j,k) for (j = jbeg; j <= jfin; j += 1) { jglob = j; #pragma omp parallel for private (k) firstprivate (ibeg) for (k = ki1; k <= ki2; k += 1) { phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (u[ibeg][j][k][1] * u[ibeg][j][k][1] + u[ibeg][j][k][2] * u[ibeg][j][k][2] + u[ibeg][j][k][3] * u[ibeg][j][k][3]) / u[ibeg][j][k][0]); } } } iglob = ifin; if (iglob == ii2) { #pragma omp parallel for private (jglob,j,k) firstprivate (jfin) for (j = jbeg; j <= jfin; j += 1) { jglob = j; #pragma omp parallel for private (k) firstprivate (ifin) for (k = ki1; k <= ki2; k += 1) { phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (u[ifin][j][k][1] * u[ifin][j][k][1] + u[ifin][j][k][2] * u[ifin][j][k][2] + u[ifin][j][k][3] * u[ifin][j][k][3]) / u[ifin][j][k][0]); } } } frc3 = 0.0; #pragma omp parallel for private (j,k) reduction (+:frc3) firstprivate (jfin1,ki1,ki2) for (j = jbeg; j <= jfin1; j += 1) { #pragma omp parallel for private (k) reduction (+:frc3) for (k = ki1; k <= ki2 - 1; k += 1) { frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]); } } frc3 = deta * dzeta * frc3; frc = 0.25 * (frc1 + frc2 + frc3); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void read_input() { FILE *fp; /*-------------------------------------------------------------------- c if input file does not exist, it uses defaults c ipr = 1 for detailed progress output c inorm = how often the norm is printed (once every inorm iterations) c itmax = number of pseudo time steps c dt = time step c omega 1 over-relaxation factor for SSOR c tolrsd = steady state residual tolerance levels c nx, ny, nz = number of grid points in x, y, z directions --------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - LU Benchmark\n\n"); fp = fopen("inputlu.data","r"); if (fp != ((void *)0)) { printf(" Reading from input file inputlu.data\n"); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d%d",&ipr,&inorm); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d",&itmax); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf",&dt); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf",&omega); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf%lf%lf%lf%lf",&tolrsd[0],&tolrsd[1],&tolrsd[2],&tolrsd[3],&tolrsd[4]); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d%d%d",&nx0,&ny0,&nz0); while(fgetc(fp) != '\n') ; fclose(fp); } else { ipr = 1; inorm = 250; itmax = 250; dt = 2.0; omega = 1.2; tolrsd[0] = 1.0e-8; tolrsd[1] = 1.0e-8; tolrsd[2] = 1.0e-8; tolrsd[3] = 1.0e-8; tolrsd[4] = 1.0e-8; nx0 = 64; ny0 = 64; nz0 = 64; } /*-------------------------------------------------------------------- c check problem size --------------------------------------------------------------------*/ if (nx0 < 4 || ny0 < 4 || nz0 < 4) { printf(" PROBLEM SIZE IS TOO SMALL - \n SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); exit(1); } if (nx0 > 64 || ny0 > 64 || nz0 > 64) { printf(" PROBLEM SIZE IS TOO LARGE - \n NX, NY AND NZ SHOULD BE EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); exit(1); } printf(" Size: %3dx%3dx%3d\n",nx0,ny0,nz0); printf(" Iterations: %3d\n",itmax); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs() { { /*-------------------------------------------------------------------- c compute the right hand sides --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= nx - 1; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= ny - 1; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= nz - 1; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = -frct[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2) for (i = L1; i <= L2; i += 1) { #pragma omp parallel for private (q,u21,j,k) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u21,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][1]; u21 = u[i][j][k][1] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][2] = u[i][j][k][2] * u21; flux[i][j][k][3] = u[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21; } } } #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,k,m) firstprivate (nx) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (i,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (m) firstprivate (tx2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } L2 = nx - 1; #pragma omp parallel for private (tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i) firstprivate (L2) for (i = ist; i <= L2; i += 1) { tmp = 1.0 / u[i][j][k][0]; u21i = tmp * u[i][j][k][1]; u31i = tmp * u[i][j][k][2]; u41i = tmp * u[i][j][k][3]; u51i = tmp * u[i][j][k][4]; tmp = 1.0 / u[i - 1][j][k][0]; u21im1 = tmp * u[i - 1][j][k][1]; u31im1 = tmp * u[i - 1][j][k][2]; u41im1 = tmp * u[i - 1][j][k][3]; u51im1 = tmp * u[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5) for (i = ist; i <= iend; i += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]); rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (- 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; #pragma omp parallel for private (i,m) firstprivate (iend1) for (i = ist1; i <= iend1; i += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]); rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u31,j,k) for (j = L1; j <= L2; j += 1) { #pragma omp parallel for private (q,u31,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][2]; u31 = u[i][j][k][2] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u31; flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][3] = u[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31; } } } #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,i,j,k,m) firstprivate (nz) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j,k,m) firstprivate (ny) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (ty2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } L2 = ny - 1; #pragma omp parallel for private (tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j) firstprivate (L2) for (j = jst; j <= L2; j += 1) { tmp = 1.0 / u[i][j][k][0]; u21j = tmp * u[i][j][k][1]; u31j = tmp * u[i][j][k][2]; u41j = tmp * u[i][j][k][3]; u51j = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j - 1][k][0]; u21jm1 = tmp * u[i][j - 1][k][1]; u31jm1 = tmp * u[i][j - 1][k][2]; u41jm1 = tmp * u[i][j - 1][k][3]; u51jm1 = tmp * u[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5) for (j = jst; j <= jend; j += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]); rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (- 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; #pragma omp parallel for private (j,m) firstprivate (jend1) for (j = jst1; j <= jend1; j += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]); rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,i,j,k,m) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u41,k) for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = u[i][j][k][3]; u41 = u[i][j][k][3] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u41; flux[i][j][k][2] = u[i][j][k][2] * u41; flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41; } #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tz2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } #pragma omp parallel for private (tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,k) for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / u[i][j][k][0]; u21k = tmp * u[i][j][k][1]; u31k = tmp * u[i][j][k][2]; u41k = tmp * u[i][j][k][3]; u51k = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j][k - 1][0]; u21km1 = tmp * u[i][j][k - 1][1]; u31km1 = tmp * u[i][j][k - 1][2]; u41km1 = tmp * u[i][j][k - 1][3]; u51km1 = tmp * u[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5) for (k = 1; k <= nz - 2; k += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]); rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (- 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]); } #pragma omp parallel for private (k,m) for (k = 3; k <= nz - 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]); rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setbv() { { /*-------------------------------------------------------------------- c set the boundary values of dependent variables --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int iglob; int jglob; /*-------------------------------------------------------------------- c set the dependent variable values along the top and bottom faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (j = 0; j <= ny - 1; j += 1) { jglob = j; exact(iglob,jglob,0,&u[i][j][0][0]); exact(iglob,jglob,nz - 1,&u[i][j][nz - 1][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along north and south faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob,0,k,&u[i][0][k][0]); } } for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob,ny0 - 1,k,&u[i][ny - 1][k][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along east and west faces --------------------------------------------------------------------*/ for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(0,jglob,k,&u[0][j][k][0]); } } for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(nx0 - 1,jglob,k,&u[nx - 1][j][k][0]); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setcoeff() { /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ dxi = 1.0 / (nx0 - 1); deta = 1.0 / (ny0 - 1); dzeta = 1.0 / (nz0 - 1); tx1 = 1.0 / (dxi * dxi); tx2 = 1.0 / (2.0 * dxi); tx3 = 1.0 / dxi; ty1 = 1.0 / (deta * deta); ty2 = 1.0 / (2.0 * deta); ty3 = 1.0 / deta; tz1 = 1.0 / (dzeta * dzeta); tz2 = 1.0 / (2.0 * dzeta); tz3 = 1.0 / dzeta; ii1 = 1; ii2 = nx0 - 2; ji1 = 1; ji2 = ny0 - 3; ki1 = 2; ki2 = nz0 - 2; /*-------------------------------------------------------------------- c diffusion coefficients --------------------------------------------------------------------*/ dx1 = 0.75; dx2 = dx1; dx3 = dx1; dx4 = dx1; dx5 = dx1; dy1 = 0.75; dy2 = dy1; dy3 = dy1; dy4 = dy1; dy5 = dy1; dz1 = 1.00; dz2 = dz1; dz3 = dz1; dz4 = dz1; dz5 = dz1; /*-------------------------------------------------------------------- c fourth difference dissipation --------------------------------------------------------------------*/ dssp = ((dx1 > ((dy1 > dz1?dy1 : dz1))?dx1 : ((dy1 > dz1?dy1 : dz1)))) / 4.0; /*-------------------------------------------------------------------- c coefficients of the exact solution to the first pde --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 5.0e-01; ce[0][7] = 2.0e-02; ce[0][8] = 1.0e-02; ce[0][9] = 3.0e-02; ce[0][10] = 5.0e-01; ce[0][11] = 4.0e-01; ce[0][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the second pde --------------------------------------------------------------------*/ ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 1.0e-02; ce[1][8] = 3.0e-02; ce[1][9] = 2.0e-02; ce[1][10] = 4.0e-01; ce[1][11] = 3.0e-01; ce[1][12] = 5.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the third pde --------------------------------------------------------------------*/ ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 4.0e-02; ce[2][8] = 3.0e-02; ce[2][9] = 5.0e-02; ce[2][10] = 3.0e-01; ce[2][11] = 5.0e-01; ce[2][12] = 4.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fourth pde --------------------------------------------------------------------*/ ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 3.0e-02; ce[3][8] = 5.0e-02; ce[3][9] = 4.0e-02; ce[3][10] = 2.0e-01; ce[3][11] = 1.0e-01; ce[3][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fifth pde --------------------------------------------------------------------*/ ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 1.0e-01; ce[4][5] = 4.0e-01; ce[4][6] = 3.0e-01; ce[4][7] = 5.0e-02; ce[4][8] = 4.0e-02; ce[4][9] = 3.0e-02; ce[4][10] = 1.0e-01; ce[4][11] = 3.0e-01; ce[4][12] = 2.0e-01; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setiv() { { /*-------------------------------------------------------------------- c c set the initial values of independent variables based on tri-linear c interpolation of boundary values in the computational space. c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double xi; double eta; double zeta; double pxi; double peta; double pzeta; double ue_1jk[5]; double ue_nx0jk[5]; double ue_i1k[5]; double ue_iny0k[5]; double ue_ij1[5]; double ue_ijnz[5]; for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 1; k <= nz - 1 - 1; k += 1) { zeta = ((double )k) / (nz - 1); if (jglob != 0 && jglob != ny0 - 1) { eta = ((double )jglob) / (ny0 - 1); for (i = 0; i <= nx - 1; i += 1) { iglob = i; if (iglob != 0 && iglob != nx0 - 1) { xi = ((double )iglob) / (nx0 - 1); exact(0,jglob,k,ue_1jk); exact(nx0 - 1,jglob,k,ue_nx0jk); exact(iglob,0,k,ue_i1k); exact(iglob,ny0 - 1,k,ue_iny0k); exact(iglob,jglob,0,ue_ij1); exact(iglob,jglob,nz - 1,ue_ijnz); #pragma omp parallel for private (pxi,peta,pzeta,m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ssor() { /*-------------------------------------------------------------------- c to perform pseudo-time stepping SSOR iterations c for five nonlinear pde s. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int istep; double tmp; double delunm[5]; double tv[64][64][5]; /*-------------------------------------------------------------------- c begin pseudo-time stepping iterations --------------------------------------------------------------------*/ tmp = 1.0 / (omega * (2.0 - omega)); /*-------------------------------------------------------------------- c initialize a,b,c,d to zero (guarantees that page tables have been c formed, if applicable on given architecture, before timestepping). --------------------------------------------------------------------*/ { #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= 63; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= 63; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { a[i][j][k][m] = 0.0; b[i][j][k][m] = 0.0; c[i][j][k][m] = 0.0; d[i][j][k][m] = 0.0; } } } } } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the L2 norms of newton iteration residuals --------------------------------------------------------------------*/ l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,rsdnm); timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c the timestep loop --------------------------------------------------------------------*/ for (istep = 1; istep <= itmax; istep += 1) { if (istep % 20 == 0 || istep == itmax || istep == 1) { printf(" Time step %4d\n",istep); } { /*-------------------------------------------------------------------- c perform SSOR iteration --------------------------------------------------------------------*/ #pragma omp parallel for private (i,j,k,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (dt) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } for (k = 1; k <= nz - 2; k += 1) { /*-------------------------------------------------------------------- c form the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacld(k); /*-------------------------------------------------------------------- c perform the lower triangular solution --------------------------------------------------------------------*/ blts(nx,ny,nz,k,omega,rsd,a,b,c,d,ist,iend,jst,jend,nx0,ny0); } for (k = nz - 2; k >= 1; k += -1) { /*-------------------------------------------------------------------- c form the strictly upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacu(k); /*-------------------------------------------------------------------- c perform the upper triangular solution --------------------------------------------------------------------*/ buts(nx,ny,nz,k,omega,rsd,tv,d,a,b,c,ist,iend,jst,jend,nx0,ny0); } /*-------------------------------------------------------------------- c update the variables --------------------------------------------------------------------*/ #pragma omp parallel for private (i,j,k,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tmp) for (m = 0; m <= 4; m += 1) { u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* end parallel */ } /*-------------------------------------------------------------------- c compute the max-norms of newton iteration corrections --------------------------------------------------------------------*/ if (istep % inorm == 0) { l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,delunm); } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the max-norms of newton iteration residuals --------------------------------------------------------------------*/ if (istep % inorm == 0 || istep == itmax) { l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,rsdnm); } /*-------------------------------------------------------------------- c check the newton-iteration residuals against the tolerance levels --------------------------------------------------------------------*/ if (rsdnm[0] < tolrsd[0] && rsdnm[1] < tolrsd[1] && rsdnm[2] < tolrsd[2] && rsdnm[3] < tolrsd[3] && rsdnm[4] < tolrsd[4]) { exit(1); } } timer_stop(1); maxtime = timer_read(1); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(double xcr[5],double xce[5],double xci,char *class,boolean *verified) { /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5]; double xceref[5]; double xciref; double xcrdif[5]; double xcedif[5]; double xcidif; double epsilon; double dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; *class = 'U'; *verified = 1; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { xcrref[m] = 1.0; xceref[m] = 1.0; } xciref = 1.0; if (nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) { *class = 'S'; dtref = 5.0e-1; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xcrref[0] = 1.6196343210976702e-02; xcrref[1] = 2.1976745164821318e-03; xcrref[2] = 1.5179927653399185e-03; xcrref[3] = 1.5029584435994323e-03; xcrref[4] = 3.4264073155896461e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xceref[0] = 6.4223319957960924e-04; xceref[1] = 8.4144342047347926e-05; xceref[2] = 5.8588269616485186e-05; xceref[3] = 5.8474222595157350e-05; xceref[4] = 1.3103347914111294e-03; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xciref = 7.8418928865937083; } else if (nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) { /* SPEC95fp size */ *class = 'W'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (33x33x33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xcrref[0] = 0.1236511638192e+02; xcrref[1] = 0.1317228477799e+01; xcrref[2] = 0.2550120713095e+01; xcrref[3] = 0.2326187750252e+01; xcrref[4] = 0.2826799444189e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (33X33X33) grid, --------------------------------------------------------------------*/ xceref[0] = 0.4867877144216; xceref[1] = 0.5064652880982e-01; xceref[2] = 0.9281818101960e-01; xceref[3] = 0.8570126542733e-01; xceref[4] = 0.1084277417792e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (33X33X33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xciref = 0.1161399311023e+02; } else if (nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) { *class = 'A'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 7.7902107606689367e+02; xcrref[1] = 6.3402765259692870e+01; xcrref[2] = 1.9499249727292479e+02; xcrref[3] = 1.7845301160418537e+02; xcrref[4] = 1.8384760349464247e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.9964085685471943e+01; xceref[1] = 2.8194576365003349; xceref[2] = 7.3473412698774742; xceref[3] = 6.7139225687777051; xceref[4] = 7.0715315688392578e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 2.6030925604886277e+01; } else if (nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) { *class = 'B'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 3.5532672969982736e+03; xcrref[1] = 2.6214750795310692e+02; xcrref[2] = 8.8333721850952190e+02; xcrref[3] = 7.7812774739425265e+02; xcrref[4] = 7.3087969592545314e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (102X102X102) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 1.1401176380212709e+02; xceref[1] = 8.1098963655421574; xceref[2] = 2.8480597317698308e+01; xceref[3] = 2.5905394567832939e+01; xceref[4] = 2.6054907504857413e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 4.7887162703308227e+01; } else if (nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) { *class = 'C'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 1.03766980323537846e+04; xcrref[1] = 8.92212458801008552e+02; xcrref[2] = 2.56238814582660871e+03; xcrref[3] = 2.19194343857831427e+03; xcrref[4] = 1.78078057261061185e+04; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (162X162X162) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.15986399716949279e+02; xceref[1] = 1.55789559239863600e+01; xceref[2] = 5.41318863077207766e+01; xceref[3] = 4.82262643154045421e+01; xceref[4] = 4.55902910043250358e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 6.66404553572181300e+01; } else { *verified = 0; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]); xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]); } xcidif = fabs((xci - xciref) / xciref); /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (( *class) != 'U') { printf("\n Verification being performed for class %1c\n",( *class)); printf(" Accuracy setting for epsilon = %20.13e\n",epsilon); if (fabs(dt - dtref) > epsilon) { *verified = 0; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n",dtref); } } else { printf(" Unknown class\n"); } if (( *class) != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m <= 4; m += 1) { if (( *class) == 'U') { printf(" %2d %20.13e\n",m,xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n",m,xcr[m],xcrref[m],xcrdif[m]); } } if (( *class) != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m <= 4; m += 1) { if (( *class) == 'U') { printf(" %2d %20.13e\n",m,xce[m]); } else if (xcedif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n",m,xce[m],xceref[m],xcedif[m]); } } if (( *class) != 'U') { printf(" Comparison of surface integral\n"); } else { printf(" Surface integral\n"); } if (( *class) == 'U') { printf(" %20.13e\n",xci); } else if (xcidif > epsilon) { *verified = 0; printf(" FAILURE: %20.13e%20.13e%20.13e\n",xci,xciref,xcidif); } else { printf(" %20.13e%20.13e%20.13e\n",xci,xciref,xcidif); } if (( *class) == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if ( *verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } }
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU This benchmark is an OpenMP C version of the NPB LU code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: S. Weeratunga V. Venkatakrishnan E. Barszcz M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "applu.h" /* function declarations */ #include <omp.h> static void blts(int nx,int ny,int nz,int k,double omega,double v[64][65][65][5],double ldz[64][64][5][5],double ldy[64][64][5][5],double ldx[64][64][5][5],double d[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0); static void buts(int nx,int ny,int nz,int k,double omega,double v[64][65][65][5],double tv[64][64][5],double d[64][64][5][5],double udx[64][64][5][5],double udy[64][64][5][5],double udz[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0); static void domain(); static void erhs(); static void error(); static void exact(int i,int j,int k,double u000ijk[5]); static void jacld(int k); static void jacu(int k); static void l2norm(int nx0,int ny0,int nz0,int ist,int iend,int jst,int jend,double v[64][65][65][5],double sum[5]); static void pintgr(); static void read_input(); static void rhs(); static void setbv(); static void setcoeff(); static void setiv(); static void ssor(); static void verify(double xcr[5],double xce[5],double xci,char *class,boolean *verified); /*-------------------------------------------------------------------- program applu --------------------------------------------------------------------*/ int main(int argc,char **argv) { /*-------------------------------------------------------------------- c c driver for the performance evaluation of the solver for c five coupled parabolic/elliptic partial differential equations. c --------------------------------------------------------------------*/ char class; boolean verified; double mflops; int nthreads = 1; /*-------------------------------------------------------------------- c read input data --------------------------------------------------------------------*/ read_input(); /*-------------------------------------------------------------------- c set up domain sizes --------------------------------------------------------------------*/ domain(); /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ setcoeff(); /*-------------------------------------------------------------------- c set the boundary values for dependent variables --------------------------------------------------------------------*/ setbv(); /*-------------------------------------------------------------------- c set the initial values for dependent variables --------------------------------------------------------------------*/ setiv(); /*-------------------------------------------------------------------- c compute the forcing term based on prescribed exact solution --------------------------------------------------------------------*/ erhs(); { } /*-------------------------------------------------------------------- c perform the SSOR iterations --------------------------------------------------------------------*/ ssor(); /*-------------------------------------------------------------------- c compute the solution error --------------------------------------------------------------------*/ error(); /*-------------------------------------------------------------------- c compute the surface integral --------------------------------------------------------------------*/ pintgr(); /*-------------------------------------------------------------------- c verification test --------------------------------------------------------------------*/ verify(rsdnm,errnm,frc,&class,&verified); mflops = ((double )itmax) * (1984.77 * ((double )nx0) * ((double )ny0) * ((double )nz0) - 10923.3 * (((double )(nx0 + ny0 + nz0)) / 3.0 * (((double )(nx0 + ny0 + nz0)) / 3.0)) + 27770.9 * ((double )(nx0 + ny0 + nz0)) / 3.0 - 144010.0) / (maxtime * 1000000.0); c_print_results("LU",class,nx0,ny0,nz0,itmax,nthreads,maxtime,mflops," floating point",verified,"3.0 structured","01 Dec 2019","(none)","(none)","-lm","(none)","(none)","(none)","(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void blts(int nx,int ny,int nz,int k,double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double ldz[64][64][5][5],double ldy[64][64][5][5],double ldx[64][64][5][5],double d[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block lower triangular solution: c c v <-- ( L-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]); } } } for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion c c forward elimination --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void buts(int nx,int ny,int nz,int k,double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double tv[64][64][5],double d[64][64][5][5],double udx[64][64][5][5],double udy[64][64][5][5],double udz[64][64][5][5],int ist,int iend,int jst,int jend,int nx0,int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block upper triangular solution: c c v <-- ( U-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; for (i = iend; i >= ist; i += -1) { for (j = jend; j >= jst; j += -1) { for (m = 0; m <= 4; m += 1) { tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]); } } } for (i = iend; i >= ist; i += -1) { for (j = jend; j >= jst; j += -1) { for (m = 0; m <= 4; m += 1) { tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; tv[i][j][3] = tv[i][j][3] / tmat[3][3]; tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; tv[i][j][2] = tv[i][j][2] / tmat[2][2]; tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; tv[i][j][1] = tv[i][j][1] / tmat[1][1]; tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; tv[i][j][0] = tv[i][j][0] / tmat[0][0]; v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void domain() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ nx = nx0; ny = ny0; nz = nz0; /*-------------------------------------------------------------------- c check the sub-domain size --------------------------------------------------------------------*/ if (nx < 4 || ny < 4 || nz < 4) { printf(" SUBDOMAIN SIZE IS TOO SMALL - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n TO 4 THEY ARE CURRENTLY%3d%3d%3d\n",nx,ny,nz); exit(1); } if (nx > 64 || ny > 64 || nz > 64) { printf(" SUBDOMAIN SIZE IS TOO LARGE - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n CURRENTLY%4d%4d%4d\n",nx,ny,nz); exit(1); } /*-------------------------------------------------------------------- c set up the start and end in i and j extents for all processors --------------------------------------------------------------------*/ ist = 1; iend = nx - 2; jst = 1; jend = ny - 2; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void erhs() { { /*-------------------------------------------------------------------- c c compute the right hand side based on exact solution c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double dsspm; double xi; double eta; double zeta; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; dsspm = dssp; for (i = 0; i <= nx - 1; i += 1) { for (j = 0; j <= ny - 1; j += 1) { for (k = 0; k <= nz - 1; k += 1) { for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = 0.0; } } } } for (i = 0; i <= nx - 1; i += 1) { iglob = i; xi = ((double )iglob) / (nx0 - 1); for (j = 0; j <= ny - 1; j += 1) { jglob = j; eta = ((double )jglob) / (ny0 - 1); for (k = 0; k <= nz - 1; k += 1) { zeta = ((double )k) / (nz - 1); for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; for (i = L1; i <= L2; i += 1) { for (j = jst; j <= jend; j += 1) { for (k = 1; k <= nz - 1 - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][1]; u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][2] = rsd[i][j][k][2] * u21; flux[i][j][k][3] = rsd[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } for (j = jst; j <= jend; j += 1) { for (k = 1; k <= nz - 2; k += 1) { for (i = ist; i <= iend; i += 1) { for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } for (i = ist; i <= L2; i += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21i = tmp * rsd[i][j][k][1]; u31i = tmp * rsd[i][j][k][2]; u41i = tmp * rsd[i][j][k][3]; u51i = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i - 1][j][k][0]; u21im1 = tmp * rsd[i - 1][j][k][1]; u31im1 = tmp * rsd[i - 1][j][k][2]; u41im1 = tmp * rsd[i - 1][j][k][3]; u51im1 = tmp * rsd[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } for (i = ist; i <= iend; i += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (- 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <= iend1; i += 1) { for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } for (m = 0; m <= 4; m += 1) { frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; for (i = ist; i <= iend; i += 1) { for (j = L1; j <= L2; j += 1) { for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = rsd[i][j][k][2]; u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u31; flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][3] = rsd[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } for (i = ist; i <= iend; i += 1) { for (k = 1; k <= nz - 2; k += 1) { for (j = jst; j <= jend; j += 1) { for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } for (j = jst; j <= L2; j += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21j = tmp * rsd[i][j][k][1]; u31j = tmp * rsd[i][j][k][2]; u41j = tmp * rsd[i][j][k][3]; u51j = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j - 1][k][0]; u21jm1 = tmp * rsd[i][j - 1][k][1]; u31jm1 = tmp * rsd[i][j - 1][k][2]; u41jm1 = tmp * rsd[i][j - 1][k][3]; u51jm1 = tmp * rsd[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } for (j = jst; j <= jend; j += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (- 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j += 1) { for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } for (m = 0; m <= 4; m += 1) { frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][3]; u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u41; flux[i][j][k][2] = rsd[i][j][k][2] * u41; flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } for (k = 1; k <= nz - 2; k += 1) { for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21k = tmp * rsd[i][j][k][1]; u31k = tmp * rsd[i][j][k][2]; u41k = tmp * rsd[i][j][k][3]; u51k = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j][k - 1][0]; u21km1 = tmp * rsd[i][j][k - 1][1]; u31km1 = tmp * rsd[i][j][k - 1][2]; u41km1 = tmp * rsd[i][j][k - 1][3]; u51km1 = tmp * rsd[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } for (k = 1; k <= nz - 2; k += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } for (k = 3; k <= nz - 4; k += 1) { for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } for (m = 0; m <= 4; m += 1) { frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error() { /*-------------------------------------------------------------------- c c compute the solution error c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double tmp; double u000ijk[5]; for (m = 0; m <= 4; m += 1) { errnm[m] = 0.0; } for (i = ist; i <= iend; i += 1) { iglob = i; for (j = jst; j <= jend; j += 1) { jglob = j; for (k = 1; k <= nz - 2; k += 1) { exact(iglob,jglob,k,u000ijk); for (m = 0; m <= 4; m += 1) { tmp = u000ijk[m] - u[i][j][k][m]; errnm[m] = errnm[m] + tmp * tmp; } } } } for (m = 0; m <= 4; m += 1) { errnm[m] = sqrt(errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact(int i,int j,int k,double u000ijk[5]) { /*-------------------------------------------------------------------- c c compute the exact solution at (i,j,k) c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int m; double xi; double eta; double zeta; xi = ((double )i) / (nx0 - 1); eta = ((double )j) / (ny0 - 1); zeta = ((double )k) / (nz - 1); for (m = 0; m <= 4; m += 1) { u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacld(int k) { /*-------------------------------------------------------------------- c compute the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k - 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tz1 * dz1; a[i][j][0][1] = 0.0; a[i][j][0][2] = 0.0; a[i][j][0][3] = -dt * tz2; a[i][j][0][4] = 0.0; a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]); a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; a[i][j][1][2] = 0.0; a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1); a[i][j][1][4] = 0.0; a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]); a[i][j][2][1] = 0.0; a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1); a[i][j][2][4] = 0.0; a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]); a[i][j][3][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * tmp1)); a[i][j][3][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * tmp1)); a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; a[i][j][3][4] = -dt * tz2 * 0.40e+00; a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]); a[i][j][4][1] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1]; a[i][j][4][2] = -dt * tz2 * (- 0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2]; a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3]; a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j - 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = -dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]); b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]); b[i][j][2][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * tmp1)); b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][3] * tmp1)); b[i][j][2][4] = -dt * ty2 * 0.40e+00; b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1); b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j - 1][k][1] * u[i][j - 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j - 1][k][2] * u[i][j - 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j - 1][k][3] * u[i][j - 1][k][3]) - c1345 * tmp2 * u[i][j - 1][k][4]); b[i][j][4][1] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1]; b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2]; b[i][j][4][3] = -dt * ty2 * (- 0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3]; b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i - 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tx1 * dx1; c[i][j][0][1] = -dt * tx2; c[i][j][0][2] = 0.0; c[i][j][0][3] = 0.0; c[i][j][0][4] = 0.0; c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]); c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; c[i][j][1][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * tmp1)); c[i][j][1][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * tmp1)); c[i][j][1][4] = -dt * tx2 * 0.40e+00; c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]); c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1); c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; c[i][j][2][3] = 0.0; c[i][j][2][4] = 0.0; c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]); c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1); c[i][j][3][2] = 0.0; c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; c[i][j][3][4] = 0.0; c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i - 1][j][k][1] * u[i - 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][2] * u[i - 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][3] * u[i - 1][j][k][3]) - c1345 * tmp2 * u[i - 1][j][k][4]); c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1]; c[i][j][4][2] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2]; c[i][j][4][3] = -dt * tx2 * (- 0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3]; c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacu(int k) { /*-------------------------------------------------------------------- c compute the upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i + 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tx1 * dx1; a[i][j][0][1] = dt * tx2; a[i][j][0][2] = 0.0; a[i][j][0][3] = 0.0; a[i][j][0][4] = 0.0; a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]); a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; a[i][j][1][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * tmp1)); a[i][j][1][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * tmp1)); a[i][j][1][4] = dt * tx2 * 0.40e+00; a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]); a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1); a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; a[i][j][2][3] = 0.0; a[i][j][2][4] = 0.0; a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]); a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1); a[i][j][3][2] = 0.0; a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; a[i][j][3][4] = 0.0; a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i + 1][j][k][1] * u[i + 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][2] * u[i + 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][3] * u[i + 1][j][k][3]) - c1345 * tmp2 * u[i + 1][j][k][4]); a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1]; a[i][j][4][2] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2]; a[i][j][4][3] = dt * tx2 * (- 0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3]; a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j + 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]); b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]); b[i][j][2][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * tmp1)); b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][3] * tmp1)); b[i][j][2][4] = dt * ty2 * 0.40e+00; b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1); b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j + 1][k][1] * u[i][j + 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j + 1][k][2] * u[i][j + 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j + 1][k][3] * u[i][j + 1][k][3]) - c1345 * tmp2 * u[i][j + 1][k][4]); b[i][j][4][1] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1]; b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2]; b[i][j][4][3] = dt * ty2 * (- 0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3]; b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k + 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tz1 * dz1; c[i][j][0][1] = 0.0; c[i][j][0][2] = 0.0; c[i][j][0][3] = dt * tz2; c[i][j][0][4] = 0.0; c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]); c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; c[i][j][1][2] = 0.0; c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1); c[i][j][1][4] = 0.0; c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]); c[i][j][2][1] = 0.0; c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1); c[i][j][2][4] = 0.0; c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]); c[i][j][3][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * tmp1)); c[i][j][3][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * tmp1)); c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; c[i][j][3][4] = dt * tz2 * 0.40e+00; c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k + 1][1] * u[i][j][k + 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k + 1][2] * u[i][j][k + 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k + 1][3] * u[i][j][k + 1][3]) - c1345 * tmp2 * u[i][j][k + 1][4]); c[i][j][4][1] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1]; c[i][j][4][2] = dt * tz2 * (- 0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2]; c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3]; c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void l2norm(int nx0,int ny0,int nz0,int ist,int iend,int jst,int jend, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5],double sum[5]) { { /*-------------------------------------------------------------------- c to compute the l2-norm of vector v. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; double sum0 = 0.0; double sum1 = 0.0; double sum2 = 0.0; double sum3 = 0.0; double sum4 = 0.0; for (m = 0; m <= 4; m += 1) { sum[m] = 0.0; } for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { for (k = 1; k <= nz0 - 2; k += 1) { sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } { sum[0] += sum0; sum[1] += sum1; sum[2] += sum2; sum[3] += sum3; sum[4] += sum4; } for (m = 0; m <= 4; m += 1) { sum[m] = sqrt(sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pintgr() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int ibeg; int ifin; int ifin1; int jbeg; int jfin; int jfin1; int iglob; int iglob1; int iglob2; int jglob; int jglob1; int jglob2; /* phi1(0:isiz2+1,0:isiz3+1) */ double phi1[66][66]; /* phi2(0:isiz2+1,0:isiz3+1) */ double phi2[66][66]; double frc1; double frc2; double frc3; /*-------------------------------------------------------------------- c set up the sub-domains for integeration in each processor --------------------------------------------------------------------*/ ibeg = nx; ifin = 0; iglob1 = - 1; iglob2 = nx - 1; if (iglob1 >= ii1 && iglob2 < ii2 + nx) ibeg = 0; if (iglob1 >= ii1 - nx && iglob2 <= ii2) ifin = nx; if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1; if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2; jbeg = ny; jfin = - 1; jglob1 = 0; jglob2 = ny - 1; if (jglob1 >= ji1 && jglob2 < ji2 + ny) jbeg = 0; if (jglob1 > ji1 - ny && jglob2 <= ji2) jfin = ny; if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1; if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2; ifin1 = ifin; jfin1 = jfin; if (ifin1 == ii2) ifin1 = ifin - 1; if (jfin1 == ji2) jfin1 = jfin - 1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ for (i = 0; i <= 65; i += 1) { for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } for (i = ibeg; i <= ifin; i += 1) { iglob = i; for (j = jbeg; j <= jfin; j += 1) { jglob = j; k = ki1; phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); k = ki2; phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); } } frc1 = 0.0; for (i = ibeg; i <= ifin1; i += 1) { for (j = jbeg; j <= jfin1; j += 1) { frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]); } } frc1 = dxi * deta * frc1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ for (i = 0; i <= 65; i += 1) { for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } jglob = jbeg; if (jglob == ji1) { for (i = ibeg; i <= ifin; i += 1) { iglob = i; for (k = ki1; k <= ki2; k += 1) { phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (u[i][jbeg][k][1] * u[i][jbeg][k][1] + u[i][jbeg][k][2] * u[i][jbeg][k][2] + u[i][jbeg][k][3] * u[i][jbeg][k][3]) / u[i][jbeg][k][0]); } } } jglob = jfin; if (jglob == ji2) { for (i = ibeg; i <= ifin; i += 1) { iglob = i; for (k = ki1; k <= ki2; k += 1) { phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (u[i][jfin][k][1] * u[i][jfin][k][1] + u[i][jfin][k][2] * u[i][jfin][k][2] + u[i][jfin][k][3] * u[i][jfin][k][3]) / u[i][jfin][k][0]); } } } frc2 = 0.0; for (i = ibeg; i <= ifin1; i += 1) { for (k = ki1; k <= ki2 - 1; k += 1) { frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]); } } frc2 = dxi * dzeta * frc2; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ for (i = 0; i <= 65; i += 1) { for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } iglob = ibeg; if (iglob == ii1) { for (j = jbeg; j <= jfin; j += 1) { jglob = j; for (k = ki1; k <= ki2; k += 1) { phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (u[ibeg][j][k][1] * u[ibeg][j][k][1] + u[ibeg][j][k][2] * u[ibeg][j][k][2] + u[ibeg][j][k][3] * u[ibeg][j][k][3]) / u[ibeg][j][k][0]); } } } iglob = ifin; if (iglob == ii2) { for (j = jbeg; j <= jfin; j += 1) { jglob = j; for (k = ki1; k <= ki2; k += 1) { phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (u[ifin][j][k][1] * u[ifin][j][k][1] + u[ifin][j][k][2] * u[ifin][j][k][2] + u[ifin][j][k][3] * u[ifin][j][k][3]) / u[ifin][j][k][0]); } } } frc3 = 0.0; for (j = jbeg; j <= jfin1; j += 1) { for (k = ki1; k <= ki2 - 1; k += 1) { frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]); } } frc3 = deta * dzeta * frc3; frc = 0.25 * (frc1 + frc2 + frc3); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void read_input() { FILE *fp; /*-------------------------------------------------------------------- c if input file does not exist, it uses defaults c ipr = 1 for detailed progress output c inorm = how often the norm is printed (once every inorm iterations) c itmax = number of pseudo time steps c dt = time step c omega 1 over-relaxation factor for SSOR c tolrsd = steady state residual tolerance levels c nx, ny, nz = number of grid points in x, y, z directions --------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - LU Benchmark\n\n"); fp = fopen("inputlu.data","r"); if (fp != ((void *)0)) { printf(" Reading from input file inputlu.data\n"); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d%d",&ipr,&inorm); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d",&itmax); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf",&dt); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf",&omega); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%lf%lf%lf%lf%lf",&tolrsd[0],&tolrsd[1],&tolrsd[2],&tolrsd[3],&tolrsd[4]); while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; while(fgetc(fp) != '\n') ; fscanf(fp,"%d%d%d",&nx0,&ny0,&nz0); while(fgetc(fp) != '\n') ; fclose(fp); } else { ipr = 1; inorm = 250; itmax = 250; dt = 2.0; omega = 1.2; tolrsd[0] = 1.0e-8; tolrsd[1] = 1.0e-8; tolrsd[2] = 1.0e-8; tolrsd[3] = 1.0e-8; tolrsd[4] = 1.0e-8; nx0 = 64; ny0 = 64; nz0 = 64; } /*-------------------------------------------------------------------- c check problem size --------------------------------------------------------------------*/ if (nx0 < 4 || ny0 < 4 || nz0 < 4) { printf(" PROBLEM SIZE IS TOO SMALL - \n SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); exit(1); } if (nx0 > 64 || ny0 > 64 || nz0 > 64) { printf(" PROBLEM SIZE IS TOO LARGE - \n NX, NY AND NZ SHOULD BE EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); exit(1); } printf(" Size: %3dx%3dx%3d\n",nx0,ny0,nz0); printf(" Iterations: %3d\n",itmax); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs() { { /*-------------------------------------------------------------------- c compute the right hand sides --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; for (i = 0; i <= nx - 1; i += 1) { for (j = 0; j <= ny - 1; j += 1) { for (k = 0; k <= nz - 1; k += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = -frct[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; for (i = L1; i <= L2; i += 1) { for (j = jst; j <= jend; j += 1) { for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][1]; u21 = u[i][j][k][1] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][2] = u[i][j][k][2] * u21; flux[i][j][k][3] = u[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21; } } } for (j = jst; j <= jend; j += 1) { for (k = 1; k <= nz - 2; k += 1) { for (i = ist; i <= iend; i += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } L2 = nx - 1; for (i = ist; i <= L2; i += 1) { tmp = 1.0 / u[i][j][k][0]; u21i = tmp * u[i][j][k][1]; u31i = tmp * u[i][j][k][2]; u41i = tmp * u[i][j][k][3]; u51i = tmp * u[i][j][k][4]; tmp = 1.0 / u[i - 1][j][k][0]; u21im1 = tmp * u[i - 1][j][k][1]; u31im1 = tmp * u[i - 1][j][k][2]; u41im1 = tmp * u[i - 1][j][k][3]; u51im1 = tmp * u[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } for (i = ist; i <= iend; i += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]); rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (- 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <= iend1; i += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]); } } for (m = 0; m <= 4; m += 1) { rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]); rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; for (i = ist; i <= iend; i += 1) { for (j = L1; j <= L2; j += 1) { for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][2]; u31 = u[i][j][k][2] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u31; flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][3] = u[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31; } } } for (i = ist; i <= iend; i += 1) { for (k = 1; k <= nz - 2; k += 1) { for (j = jst; j <= jend; j += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } L2 = ny - 1; for (j = jst; j <= L2; j += 1) { tmp = 1.0 / u[i][j][k][0]; u21j = tmp * u[i][j][k][1]; u31j = tmp * u[i][j][k][2]; u41j = tmp * u[i][j][k][3]; u51j = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j - 1][k][0]; u21jm1 = tmp * u[i][j - 1][k][1]; u31jm1 = tmp * u[i][j - 1][k][2]; u41jm1 = tmp * u[i][j - 1][k][3]; u51jm1 = tmp * u[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } for (j = jst; j <= jend; j += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]); rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (- 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]); } } for (m = 0; m <= 4; m += 1) { rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]); rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = u[i][j][k][3]; u41 = u[i][j][k][3] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u41; flux[i][j][k][2] = u[i][j][k][2] * u41; flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41; } for (k = 1; k <= nz - 2; k += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / u[i][j][k][0]; u21k = tmp * u[i][j][k][1]; u31k = tmp * u[i][j][k][2]; u41k = tmp * u[i][j][k][3]; u51k = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j][k - 1][0]; u21km1 = tmp * u[i][j][k - 1][1]; u31km1 = tmp * u[i][j][k - 1][2]; u41km1 = tmp * u[i][j][k - 1][3]; u51km1 = tmp * u[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } for (k = 1; k <= nz - 2; k += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]); rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (- 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]); } for (k = 3; k <= nz - 4; k += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]); } } for (m = 0; m <= 4; m += 1) { rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]); rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setbv() { { /*-------------------------------------------------------------------- c set the boundary values of dependent variables --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int iglob; int jglob; /*-------------------------------------------------------------------- c set the dependent variable values along the top and bottom faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (j = 0; j <= ny - 1; j += 1) { jglob = j; exact(iglob,jglob,0,&u[i][j][0][0]); exact(iglob,jglob,nz - 1,&u[i][j][nz - 1][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along north and south faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob,0,k,&u[i][0][k][0]); } } for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob,ny0 - 1,k,&u[i][ny - 1][k][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along east and west faces --------------------------------------------------------------------*/ for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(0,jglob,k,&u[0][j][k][0]); } } for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(nx0 - 1,jglob,k,&u[nx - 1][j][k][0]); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setcoeff() { /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ dxi = 1.0 / (nx0 - 1); deta = 1.0 / (ny0 - 1); dzeta = 1.0 / (nz0 - 1); tx1 = 1.0 / (dxi * dxi); tx2 = 1.0 / (2.0 * dxi); tx3 = 1.0 / dxi; ty1 = 1.0 / (deta * deta); ty2 = 1.0 / (2.0 * deta); ty3 = 1.0 / deta; tz1 = 1.0 / (dzeta * dzeta); tz2 = 1.0 / (2.0 * dzeta); tz3 = 1.0 / dzeta; ii1 = 1; ii2 = nx0 - 2; ji1 = 1; ji2 = ny0 - 3; ki1 = 2; ki2 = nz0 - 2; /*-------------------------------------------------------------------- c diffusion coefficients --------------------------------------------------------------------*/ dx1 = 0.75; dx2 = dx1; dx3 = dx1; dx4 = dx1; dx5 = dx1; dy1 = 0.75; dy2 = dy1; dy3 = dy1; dy4 = dy1; dy5 = dy1; dz1 = 1.00; dz2 = dz1; dz3 = dz1; dz4 = dz1; dz5 = dz1; /*-------------------------------------------------------------------- c fourth difference dissipation --------------------------------------------------------------------*/ dssp = ((dx1 > ((dy1 > dz1?dy1 : dz1))?dx1 : ((dy1 > dz1?dy1 : dz1)))) / 4.0; /*-------------------------------------------------------------------- c coefficients of the exact solution to the first pde --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 5.0e-01; ce[0][7] = 2.0e-02; ce[0][8] = 1.0e-02; ce[0][9] = 3.0e-02; ce[0][10] = 5.0e-01; ce[0][11] = 4.0e-01; ce[0][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the second pde --------------------------------------------------------------------*/ ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 1.0e-02; ce[1][8] = 3.0e-02; ce[1][9] = 2.0e-02; ce[1][10] = 4.0e-01; ce[1][11] = 3.0e-01; ce[1][12] = 5.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the third pde --------------------------------------------------------------------*/ ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 4.0e-02; ce[2][8] = 3.0e-02; ce[2][9] = 5.0e-02; ce[2][10] = 3.0e-01; ce[2][11] = 5.0e-01; ce[2][12] = 4.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fourth pde --------------------------------------------------------------------*/ ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 3.0e-02; ce[3][8] = 5.0e-02; ce[3][9] = 4.0e-02; ce[3][10] = 2.0e-01; ce[3][11] = 1.0e-01; ce[3][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fifth pde --------------------------------------------------------------------*/ ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 1.0e-01; ce[4][5] = 4.0e-01; ce[4][6] = 3.0e-01; ce[4][7] = 5.0e-02; ce[4][8] = 4.0e-02; ce[4][9] = 3.0e-02; ce[4][10] = 1.0e-01; ce[4][11] = 3.0e-01; ce[4][12] = 2.0e-01; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setiv() { { /*-------------------------------------------------------------------- c c set the initial values of independent variables based on tri-linear c interpolation of boundary values in the computational space. c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double xi; double eta; double zeta; double pxi; double peta; double pzeta; double ue_1jk[5]; double ue_nx0jk[5]; double ue_i1k[5]; double ue_iny0k[5]; double ue_ij1[5]; double ue_ijnz[5]; for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 1; k <= nz - 1 - 1; k += 1) { zeta = ((double )k) / (nz - 1); if (jglob != 0 && jglob != ny0 - 1) { eta = ((double )jglob) / (ny0 - 1); for (i = 0; i <= nx - 1; i += 1) { iglob = i; if (iglob != 0 && iglob != nx0 - 1) { xi = ((double )iglob) / (nx0 - 1); exact(0,jglob,k,ue_1jk); exact(nx0 - 1,jglob,k,ue_nx0jk); exact(iglob,0,k,ue_i1k); exact(iglob,ny0 - 1,k,ue_iny0k); exact(iglob,jglob,0,ue_ij1); exact(iglob,jglob,nz - 1,ue_ijnz); for (m = 0; m <= 4; m += 1) { pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ssor() { /*-------------------------------------------------------------------- c to perform pseudo-time stepping SSOR iterations c for five nonlinear pde s. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int istep; double tmp; double delunm[5]; double tv[64][64][5]; /*-------------------------------------------------------------------- c begin pseudo-time stepping iterations --------------------------------------------------------------------*/ tmp = 1.0 / (omega * (2.0 - omega)); /*-------------------------------------------------------------------- c initialize a,b,c,d to zero (guarantees that page tables have been c formed, if applicable on given architecture, before timestepping). --------------------------------------------------------------------*/ { for (i = 0; i <= 63; i += 1) { for (j = 0; j <= 63; j += 1) { for (k = 0; k <= 4; k += 1) { for (m = 0; m <= 4; m += 1) { a[i][j][k][m] = 0.0; b[i][j][k][m] = 0.0; c[i][j][k][m] = 0.0; d[i][j][k][m] = 0.0; } } } } } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the L2 norms of newton iteration residuals --------------------------------------------------------------------*/ l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,rsdnm); timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c the timestep loop --------------------------------------------------------------------*/ for (istep = 1; istep <= itmax; istep += 1) { if (istep % 20 == 0 || istep == itmax || istep == 1) { printf(" Time step %4d\n",istep); } { /*-------------------------------------------------------------------- c perform SSOR iteration --------------------------------------------------------------------*/ for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { for (k = 1; k <= nz - 2; k += 1) { for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } for (k = 1; k <= nz - 2; k += 1) { /*-------------------------------------------------------------------- c form the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacld(k); /*-------------------------------------------------------------------- c perform the lower triangular solution --------------------------------------------------------------------*/ blts(nx,ny,nz,k,omega,rsd,a,b,c,d,ist,iend,jst,jend,nx0,ny0); } for (k = nz - 2; k >= 1; k += -1) { /*-------------------------------------------------------------------- c form the strictly upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacu(k); /*-------------------------------------------------------------------- c perform the upper triangular solution --------------------------------------------------------------------*/ buts(nx,ny,nz,k,omega,rsd,tv,d,a,b,c,ist,iend,jst,jend,nx0,ny0); } /*-------------------------------------------------------------------- c update the variables --------------------------------------------------------------------*/ for (i = ist; i <= iend; i += 1) { for (j = jst; j <= jend; j += 1) { for (k = 1; k <= nz - 2; k += 1) { for (m = 0; m <= 4; m += 1) { u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* end parallel */ } /*-------------------------------------------------------------------- c compute the max-norms of newton iteration corrections --------------------------------------------------------------------*/ if (istep % inorm == 0) { l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,delunm); } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the max-norms of newton iteration residuals --------------------------------------------------------------------*/ if (istep % inorm == 0 || istep == itmax) { l2norm(nx0,ny0,nz0,ist,iend,jst,jend,rsd,rsdnm); } /*-------------------------------------------------------------------- c check the newton-iteration residuals against the tolerance levels --------------------------------------------------------------------*/ if (rsdnm[0] < tolrsd[0] && rsdnm[1] < tolrsd[1] && rsdnm[2] < tolrsd[2] && rsdnm[3] < tolrsd[3] && rsdnm[4] < tolrsd[4]) { exit(1); } } timer_stop(1); maxtime = timer_read(1); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(double xcr[5],double xce[5],double xci,char *class,boolean *verified) { /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5]; double xceref[5]; double xciref; double xcrdif[5]; double xcedif[5]; double xcidif; double epsilon; double dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; *class = 'U'; *verified = 1; for (m = 0; m <= 4; m += 1) { xcrref[m] = 1.0; xceref[m] = 1.0; } xciref = 1.0; if (nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) { *class = 'S'; dtref = 5.0e-1; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xcrref[0] = 1.6196343210976702e-02; xcrref[1] = 2.1976745164821318e-03; xcrref[2] = 1.5179927653399185e-03; xcrref[3] = 1.5029584435994323e-03; xcrref[4] = 3.4264073155896461e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xceref[0] = 6.4223319957960924e-04; xceref[1] = 8.4144342047347926e-05; xceref[2] = 5.8588269616485186e-05; xceref[3] = 5.8474222595157350e-05; xceref[4] = 1.3103347914111294e-03; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xciref = 7.8418928865937083; } else if (nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) { /* SPEC95fp size */ *class = 'W'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (33x33x33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xcrref[0] = 0.1236511638192e+02; xcrref[1] = 0.1317228477799e+01; xcrref[2] = 0.2550120713095e+01; xcrref[3] = 0.2326187750252e+01; xcrref[4] = 0.2826799444189e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (33X33X33) grid, --------------------------------------------------------------------*/ xceref[0] = 0.4867877144216; xceref[1] = 0.5064652880982e-01; xceref[2] = 0.9281818101960e-01; xceref[3] = 0.8570126542733e-01; xceref[4] = 0.1084277417792e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (33X33X33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xciref = 0.1161399311023e+02; } else if (nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) { *class = 'A'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 7.7902107606689367e+02; xcrref[1] = 6.3402765259692870e+01; xcrref[2] = 1.9499249727292479e+02; xcrref[3] = 1.7845301160418537e+02; xcrref[4] = 1.8384760349464247e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.9964085685471943e+01; xceref[1] = 2.8194576365003349; xceref[2] = 7.3473412698774742; xceref[3] = 6.7139225687777051; xceref[4] = 7.0715315688392578e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 2.6030925604886277e+01; } else if (nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) { *class = 'B'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 3.5532672969982736e+03; xcrref[1] = 2.6214750795310692e+02; xcrref[2] = 8.8333721850952190e+02; xcrref[3] = 7.7812774739425265e+02; xcrref[4] = 7.3087969592545314e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (102X102X102) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 1.1401176380212709e+02; xceref[1] = 8.1098963655421574; xceref[2] = 2.8480597317698308e+01; xceref[3] = 2.5905394567832939e+01; xceref[4] = 2.6054907504857413e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 4.7887162703308227e+01; } else if (nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) { *class = 'C'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 1.03766980323537846e+04; xcrref[1] = 8.92212458801008552e+02; xcrref[2] = 2.56238814582660871e+03; xcrref[3] = 2.19194343857831427e+03; xcrref[4] = 1.78078057261061185e+04; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (162X162X162) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.15986399716949279e+02; xceref[1] = 1.55789559239863600e+01; xceref[2] = 5.41318863077207766e+01; xceref[3] = 4.82262643154045421e+01; xceref[4] = 4.55902910043250358e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 6.66404553572181300e+01; } else { *verified = 0; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]); xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]); } xcidif = fabs((xci - xciref) / xciref); /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (( *class) != 'U') { printf("\n Verification being performed for class %1c\n",( *class)); printf(" Accuracy setting for epsilon = %20.13e\n",epsilon); if (fabs(dt - dtref) > epsilon) { *verified = 0; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n",dtref); } } else { printf(" Unknown class\n"); } if (( *class) != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m <= 4; m += 1) { if (( *class) == 'U') { printf(" %2d %20.13e\n",m,xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n",m,xcr[m],xcrref[m],xcrdif[m]); } } if (( *class) != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m <= 4; m += 1) { if (( *class) == 'U') { printf(" %2d %20.13e\n",m,xce[m]); } else if (xcedif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n",m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n",m,xce[m],xceref[m],xcedif[m]); } } if (( *class) != 'U') { printf(" Comparison of surface integral\n"); } else { printf(" Surface integral\n"); } if (( *class) == 'U') { printf(" %20.13e\n",xci); } else if (xcidif > epsilon) { *verified = 0; printf(" FAILURE: %20.13e%20.13e%20.13e\n",xci,xciref,xcidif); } else { printf(" %20.13e%20.13e%20.13e\n",xci,xciref,xcidif); } if (( *class) == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if ( *verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } }
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU This benchmark is an OpenMP C version of the NPB LU code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: S. Weeratunga V. Venkatakrishnan E. Barszcz M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "applu.h" #if defined(_OPENMP) /* for thread synchronization */ #endif /* _OPENMP */ /* function declarations */ #include <omp.h> static void blts(int nx, int ny, int nz, int k, double omega, double v[64][65][65][5], double ldz[64][64][5][5], double ldy[64][64][5][5], double ldx[64][64][5][5], double d[64][64][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0); static void buts(int nx, int ny, int nz, int k, double omega, double v[64][65][65][5], double tv[64][64][5], double d[64][64][5][5], double udx[64][64][5][5], double udy[64][64][5][5], double udz[64][64][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0); static void domain(); static void erhs(); static void error(); static void exact(int i, int j, int k, double u000ijk[5]); static void jacld(int k); static void jacu(int k); static void l2norm(int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[64][65][65][5], double sum[5]); static void pintgr(); static void read_input(); static void rhs(); static void setbv(); static void setcoeff(); static void setiv(); static void ssor(); static void verify(double xcr[5], double xce[5], double xci, char *class, boolean * verified); /*-------------------------------------------------------------------- program applu --------------------------------------------------------------------*/ int main(int argc, char **argv) { /*-------------------------------------------------------------------- c c driver for the performance evaluation of the solver for c five coupled parabolic/elliptic partial differential equations. c --------------------------------------------------------------------*/ char class; boolean verified; double mflops; int nthreads = 1; /*-------------------------------------------------------------------- c read input data --------------------------------------------------------------------*/ read_input(); /*-------------------------------------------------------------------- c set up domain sizes --------------------------------------------------------------------*/ domain(); /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ setcoeff(); /*-------------------------------------------------------------------- c set the boundary values for dependent variables --------------------------------------------------------------------*/ setbv(); /*-------------------------------------------------------------------- c set the initial values for dependent variables --------------------------------------------------------------------*/ setiv(); /*-------------------------------------------------------------------- c compute the forcing term based on prescribed exact solution --------------------------------------------------------------------*/ erhs(); { #if defined(_OPENMP) #endif /* _OPENMP */ } /*-------------------------------------------------------------------- c perform the SSOR iterations --------------------------------------------------------------------*/ ssor(); /*-------------------------------------------------------------------- c compute the solution error --------------------------------------------------------------------*/ error(); /*-------------------------------------------------------------------- c compute the surface integral --------------------------------------------------------------------*/ pintgr(); /*-------------------------------------------------------------------- c verification test --------------------------------------------------------------------*/ verify(rsdnm, errnm, frc, &class, &verified); mflops = ((double)itmax) * (1984.77 * ((double)nx0) * ((double)ny0) * ((double)nz0) - 10923.3 * (((double)(nx0 + ny0 + nz0)) / 3.0 * (((double)(nx0 + ny0 + nz0)) / 3.0)) + 27770.9 * ((double)(nx0 + ny0 + nz0)) / 3.0 - 144010.0) / (maxtime * 1000000.0); c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, "3.0 structured", "01 Dec 2019", "(none)", "(none)", "-lm", "(none)", "(none)", "(none)", "(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void blts(int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5], double ldz[64][64][5][5], double ldy[64][64][5][5], double ldx[64][64][5][5], double d[64][64][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block lower triangular solution: c c v <-- ( L-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; #pragma omp parallel for private (i,j,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (k,omega) for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldz[i][j][m][0] * v[i][j][k - 1][0] + ldz[i][j][m][1] * v[i][j][k - 1][1] + ldz[i][j][m][2] * v[i][j][k - 1][2] + ldz[i][j][m][3] * v[i][j][k - 1][3] + ldz[i][j][m][4] * v[i][j][k - 1][4]); } } } for (i = ist; i <= iend; i += 1) { #if defined(_OPENMP) #endif /* _OPENMP */ for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (omega) for (m = 0; m <= 4; m += 1) { v[i][j][k][m] = v[i][j][k][m] - omega * (ldy[i][j][m][0] * v[i][j - 1][k][0] + ldx[i][j][m][0] * v[i - 1][j][k][0] + ldy[i][j][m][1] * v[i][j - 1][k][1] + ldx[i][j][m][1] * v[i - 1][j][k][1] + ldy[i][j][m][2] * v[i][j - 1][k][2] + ldx[i][j][m][2] * v[i - 1][j][k][2] + ldy[i][j][m][3] * v[i][j - 1][k][3] + ldx[i][j][m][3] * v[i - 1][j][k][3] + ldy[i][j][m][4] * v[i][j - 1][k][4] + ldx[i][j][m][4] * v[i - 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion c c forward elimination --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } #if defined(_OPENMP) #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void buts(int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5], double tv[64][64][5], double d[64][64][5][5], double udx[64][64][5][5], double udy[64][64][5][5], double udz[64][64][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block upper triangular solution: c c v <-- ( U-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int m; double tmp; double tmp1; double tmat[5][5]; #pragma omp parallel for private (i,j,m) for (i = iend; i >= ist; i += -1) { #pragma omp parallel for private (j,m) for (j = jend; j >= jst; j += -1) { #pragma omp parallel for private (m) firstprivate (k,omega) for (m = 0; m <= 4; m += 1) { tv[i][j][m] = omega * (udz[i][j][m][0] * v[i][j][k + 1][0] + udz[i][j][m][1] * v[i][j][k + 1][1] + udz[i][j][m][2] * v[i][j][k + 1][2] + udz[i][j][m][3] * v[i][j][k + 1][3] + udz[i][j][m][4] * v[i][j][k + 1][4]); } } } for (i = iend; i >= ist; i += -1) { #if defined(_OPENMP) #endif /* _OPENMP */ for (j = jend; j >= jst; j += -1) { #pragma omp parallel for private (m) firstprivate (omega) for (m = 0; m <= 4; m += 1) { tv[i][j][m] = tv[i][j][m] + omega * (udy[i][j][m][0] * v[i][j + 1][k][0] + udx[i][j][m][0] * v[i + 1][j][k][0] + udy[i][j][m][1] * v[i][j + 1][k][1] + udx[i][j][m][1] * v[i + 1][j][k][1] + udy[i][j][m][2] * v[i][j + 1][k][2] + udx[i][j][m][2] * v[i + 1][j][k][2] + udy[i][j][m][3] * v[i][j + 1][k][3] + udx[i][j][m][3] * v[i + 1][j][k][3] + udy[i][j][m][4] * v[i][j + 1][k][4] + udx[i][j][m][4] * v[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c diagonal block inversion --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; tv[i][j][3] = tv[i][j][3] / tmat[3][3]; tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; tv[i][j][2] = tv[i][j][2] / tmat[2][2]; tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; tv[i][j][1] = tv[i][j][1] / tmat[1][1]; tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; tv[i][j][0] = tv[i][j][0] / tmat[0][0]; v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } #if defined(_OPENMP) #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void domain() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ nx = nx0; ny = ny0; nz = nz0; /*-------------------------------------------------------------------- c check the sub-domain size --------------------------------------------------------------------*/ if (nx < 4 || ny < 4 || nz < 4) { printf(" SUBDOMAIN SIZE IS TOO SMALL - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz); exit(1); } if (nx > 64 || ny > 64 || nz > 64) { printf(" SUBDOMAIN SIZE IS TOO LARGE - \n ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n CURRENTLY%4d%4d%4d\n", nx, ny, nz); exit(1); } /*-------------------------------------------------------------------- c set up the start and end in i and j extents for all processors --------------------------------------------------------------------*/ ist = 1; iend = nx - 2; jst = 1; jend = ny - 2; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void erhs() { { /*-------------------------------------------------------------------- c c compute the right hand side based on exact solution c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double dsspm; double xi; double eta; double zeta; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; dsspm = dssp; #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= nx - 1; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= ny - 1; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= nz - 1; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = 0.0; } } } } #pragma omp parallel for private (iglob,jglob,xi,eta,zeta,i,j,k,m) firstprivate (nx0) for (i = 0; i <= nx - 1; i += 1) { iglob = i; xi = ((double)iglob) / (nx0 - 1); #pragma omp parallel for private (jglob,eta,zeta,j,k,m) firstprivate (ny0) for (j = 0; j <= ny - 1; j += 1) { jglob = j; eta = ((double)jglob) / (ny0 - 1); #pragma omp parallel for private (zeta,k,m) for (k = 0; k <= nz - 1; k += 1) { zeta = ((double)k) / (nz - 1); #pragma omp parallel for private (m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2) for (i = L1; i <= L2; i += 1) { #pragma omp parallel for private (q,u21,j,k) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u21,k) for (k = 1; k <= nz - 1 - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][1]; u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u21 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][2] = rsd[i][j][k][2] * u21; flux[i][j][k][3] = rsd[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u21; } } } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,ist1,iend1,tmp,u21i,u31i,u41i,u51i,i,k,m) firstprivate (nx,L2) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (i,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (m) firstprivate (tx2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } #pragma omp parallel for private (u21im1,u31im1,u41im1,u51im1,tmp,u21i,u31i,u41i,u51i,i) for (i = ist; i <= L2; i += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21i = tmp * rsd[i][j][k][1]; u31i = tmp * rsd[i][j][k][2]; u41i = tmp * rsd[i][j][k][3]; u51i = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i - 1][j][k][0]; u21im1 = tmp * rsd[i - 1][j][k][1]; u31im1 = tmp * rsd[i - 1][j][k][2]; u41im1 = tmp * rsd[i - 1][j][k][3]; u51im1 = tmp * rsd[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5) for (i = ist; i <= iend; i += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * (rsd[i - 1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i + 1][j][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (rsd[i - 1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i + 1][j][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (rsd[i - 1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i + 1][j][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (rsd[i - 1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i + 1][j][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (rsd[i - 1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[1][j][k][m] = frct[1][j][k][m] - dsspm * (+5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m]); frct[2][j][k][m] = frct[2][j][k][m] - dsspm * (-4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; #pragma omp parallel for private (i,m) firstprivate (iend1) for (i = ist1; i <= iend1; i += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i - 2][j][k][m] - 4.0 * rsd[i - 1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i + 1][j][k][m] + rsd[i + 2][j][k][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[nx - 3][j][k][m] = frct[nx - 3][j][k][m] - dsspm * (rsd[nx - 5][j][k][m] - 4.0 * rsd[nx - 4][j][k][m] + 6.0 * rsd[nx - 3][j][k][m] - 4.0 * rsd[nx - 2][j][k][m]); frct[nx - 2][j][k][m] = frct[nx - 2][j][k][m] - dsspm * (rsd[nx - 4][j][k][m] - 4.0 * rsd[nx - 3][j][k][m] + 5.0 * rsd[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u31,j,k) for (j = L1; j <= L2; j += 1) { #pragma omp parallel for private (q,u31,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = rsd[i][j][k][2]; u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u31; flux[i][j][k][2] = rsd[i][j][k][2] * u31 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][3] = rsd[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u31; } } } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,i,j,k,m) firstprivate (nz) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,jst1,jend1,tmp,u21j,u31j,u41j,u51j,j,k,m) firstprivate (ny,L2) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (ty2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } #pragma omp parallel for private (u21jm1,u31jm1,u41jm1,u51jm1,tmp,u21j,u31j,u41j,u51j,j) for (j = jst; j <= L2; j += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21j = tmp * rsd[i][j][k][1]; u31j = tmp * rsd[i][j][k][2]; u41j = tmp * rsd[i][j][k][3]; u51j = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j - 1][k][0]; u21jm1 = tmp * rsd[i][j - 1][k][1]; u31jm1 = tmp * rsd[i][j - 1][k][2]; u41jm1 = tmp * rsd[i][j - 1][k][3]; u51jm1 = tmp * rsd[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5) for (j = jst; j <= jend; j += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * (rsd[i][j - 1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j + 1][k][0]); frct[i][j][k][1] = frct[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (rsd[i][j - 1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j + 1][k][1]); frct[i][j][k][2] = frct[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (rsd[i][j - 1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j + 1][k][2]); frct[i][j][k][3] = frct[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (rsd[i][j - 1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j + 1][k][3]); frct[i][j][k][4] = frct[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (rsd[i][j - 1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][1][k][m] = frct[i][1][k][m] - dsspm * (+5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m]); frct[i][2][k][m] = frct[i][2][k][m] - dsspm * (-4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; #pragma omp parallel for private (j,m) firstprivate (jend1) for (j = jst1; j <= jend1; j += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j - 2][k][m] - 4.0 * rsd[i][j - 1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j + 1][k][m] + rsd[i][j + 2][k][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[i][ny - 3][k][m] = frct[i][ny - 3][k][m] - dsspm * (rsd[i][ny - 5][k][m] - 4.0 * rsd[i][ny - 4][k][m] + 6.0 * rsd[i][ny - 3][k][m] - 4.0 * rsd[i][ny - 2][k][m]); frct[i][ny - 2][k][m] = frct[i][ny - 2][k][m] - dsspm * (rsd[i][ny - 4][k][m] - 4.0 * rsd[i][ny - 3][k][m] + 5.0 * rsd[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,i,j,k,m) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,q,u41,tmp,u21k,j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u41,k) for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = rsd[i][j][k][3]; u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; q = 0.50 * (rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3]) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u41; flux[i][j][k][2] = rsd[i][j][k][2] * u41; flux[i][j][k][3] = rsd[i][j][k][3] * u41 + 0.40e+00 * (rsd[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * rsd[i][j][k][4] - 0.40e+00 * q) * u41; } #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tz2) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } #pragma omp parallel for private (u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,tmp,u21k,k) for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / rsd[i][j][k][0]; u21k = tmp * rsd[i][j][k][1]; u31k = tmp * rsd[i][j][k][2]; u41k = tmp * rsd[i][j][k][3]; u51k = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j][k - 1][0]; u21km1 = tmp * rsd[i][j][k - 1][1]; u31km1 = tmp * rsd[i][j][k - 1][2]; u41km1 = tmp * rsd[i][j][k - 1][3]; u51km1 = tmp * rsd[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5) for (k = 1; k <= nz - 2; k += 1) { frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * (rsd[i][j][k + 1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k - 1][0]); frct[i][j][k][1] = frct[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (rsd[i][j][k + 1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k - 1][1]); frct[i][j][k][2] = frct[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (rsd[i][j][k + 1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k - 1][2]); frct[i][j][k][3] = frct[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (rsd[i][j][k + 1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k - 1][3]); frct[i][j][k][4] = frct[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (rsd[i][j][k + 1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k - 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][1][m] = frct[i][j][1][m] - dsspm * (+5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m]); frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (-4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m]); } #pragma omp parallel for private (k,m) for (k = 3; k <= nz - 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * (rsd[i][j][k - 2][m] - 4.0 * rsd[i][j][k - 1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k + 1][m] + rsd[i][j][k + 2][m]); } } #pragma omp parallel for private (m) firstprivate (dsspm) for (m = 0; m <= 4; m += 1) { frct[i][j][nz - 3][m] = frct[i][j][nz - 3][m] - dsspm * (rsd[i][j][nz - 5][m] - 4.0 * rsd[i][j][nz - 4][m] + 6.0 * rsd[i][j][nz - 3][m] - 4.0 * rsd[i][j][nz - 2][m]); frct[i][j][nz - 2][m] = frct[i][j][nz - 2][m] - dsspm * (rsd[i][j][nz - 4][m] - 4.0 * rsd[i][j][nz - 3][m] + 5.0 * rsd[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error() { /*-------------------------------------------------------------------- c c compute the solution error c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double tmp; double u000ijk[5]; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { errnm[m] = 0.0; } for (i = ist; i <= iend; i += 1) { iglob = i; for (j = jst; j <= jend; j += 1) { jglob = j; for (k = 1; k <= nz - 2; k += 1) { exact(iglob, jglob, k, u000ijk); #pragma omp parallel for private (tmp,m) for (m = 0; m <= 4; m += 1) { tmp = u000ijk[m] - u[i][j][k][m]; errnm[m] = errnm[m] + tmp * tmp; } } } } for (m = 0; m <= 4; m += 1) { errnm[m] = sqrt(errnm[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact(int i, int j, int k, double u000ijk[5]) { /*-------------------------------------------------------------------- c c compute the exact solution at (i,j,k) c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int m; double xi; double eta; double zeta; xi = ((double)i) / (nx0 - 1); eta = ((double)j) / (ny0 - 1); zeta = ((double)k) / (nz - 1); #pragma omp parallel for private (m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacld(int k) { /*-------------------------------------------------------------------- c compute the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt) for (j = jst; j <= jend; j += 1) { /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k - 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tz1 * dz1; a[i][j][0][1] = 0.0; a[i][j][0][2] = 0.0; a[i][j][0][3] = -dt * tz2; a[i][j][0][4] = 0.0; a[i][j][1][0] = -dt * tz2 * (-(u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][1]); a[i][j][1][1] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; a[i][j][1][2] = 0.0; a[i][j][1][3] = -dt * tz2 * (u[i][j][k - 1][1] * tmp1); a[i][j][1][4] = 0.0; a[i][j][2][0] = -dt * tz2 * (-(u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k - 1][2]); a[i][j][2][1] = 0.0; a[i][j][2][2] = -dt * tz2 * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; a[i][j][2][3] = -dt * tz2 * (u[i][j][k - 1][2] * tmp1); a[i][j][2][4] = 0.0; a[i][j][3][0] = -dt * tz2 * (-(u[i][j][k - 1][3] * tmp1) * (u[i][j][k - 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k - 1][3]); a[i][j][3][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * tmp1)); a[i][j][3][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * tmp1)); a[i][j][3][3] = -dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k - 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; a[i][j][3][4] = -dt * tz2 * 0.40e+00; a[i][j][4][0] = -dt * tz2 * ((0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k - 1][4] * tmp1)) * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k - 1][1] * u[i][j][k - 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k - 1][2] * u[i][j][k - 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k - 1][3] * u[i][j][k - 1][3]) - c1345 * tmp2 * u[i][j][k - 1][4]); a[i][j][4][1] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][1] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][1]; a[i][j][4][2] = -dt * tz2 * (-0.40e+00 * (u[i][j][k - 1][2] * u[i][j][k - 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k - 1][2]; a[i][j][4][3] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k - 1][1] * u[i][j][k - 1][1] + u[i][j][k - 1][2] * u[i][j][k - 1][2] + 3.0 * u[i][j][k - 1][3] * u[i][j][k - 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k - 1][3]; a[i][j][4][4] = -dt * tz2 * (1.40e+00 * (u[i][j][k - 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j - 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = -dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = -dt * ty2 * (-(u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][1]); b[i][j][1][1] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = -dt * ty2 * (u[i][j - 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * tmp1) * (u[i][j - 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j - 1][k][2]); b[i][j][2][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * tmp1)); b[i][j][2][2] = -dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][3] * tmp1)); b[i][j][2][4] = -dt * ty2 * 0.40e+00; b[i][j][3][0] = -dt * ty2 * (-(u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j - 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = -dt * ty2 * (u[i][j - 1][k][3] * tmp1); b[i][j][3][3] = -dt * ty2 * (u[i][j - 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = -dt * ty2 * ((0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][1] + u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j - 1][k][4] * tmp1)) * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j - 1][k][1] * u[i][j - 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j - 1][k][2] * u[i][j - 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j - 1][k][3] * u[i][j - 1][k][3]) - c1345 * tmp2 * u[i][j - 1][k][4]); b[i][j][4][1] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][1] * u[i][j - 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][1]; b[i][j][4][2] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j - 1][k][1] * u[i][j - 1][k][1] + 3.0 * u[i][j - 1][k][2] * u[i][j - 1][k][2] + u[i][j - 1][k][3] * u[i][j - 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j - 1][k][2]; b[i][j][4][3] = -dt * ty2 * (-0.40e+00 * (u[i][j - 1][k][2] * u[i][j - 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j - 1][k][3]; b[i][j][4][4] = -dt * ty2 * (1.40e+00 * (u[i][j - 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i - 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tx1 * dx1; c[i][j][0][1] = -dt * tx2; c[i][j][0][2] = 0.0; c[i][j][0][3] = 0.0; c[i][j][0][4] = 0.0; c[i][j][1][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * tmp1) * (u[i - 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i - 1][j][k][1]); c[i][j][1][1] = -dt * tx2 * ((2.0 - 0.40e+00) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; c[i][j][1][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * tmp1)); c[i][j][1][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * tmp1)); c[i][j][1][4] = -dt * tx2 * 0.40e+00; c[i][j][2][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][2]); c[i][j][2][1] = -dt * tx2 * (u[i - 1][j][k][2] * tmp1); c[i][j][2][2] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; c[i][j][2][3] = 0.0; c[i][j][2][4] = 0.0; c[i][j][3][0] = -dt * tx2 * (-(u[i - 1][j][k][1] * u[i - 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i - 1][j][k][3]); c[i][j][3][1] = -dt * tx2 * (u[i - 1][j][k][3] * tmp1); c[i][j][3][2] = 0.0; c[i][j][3][3] = -dt * tx2 * (u[i - 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; c[i][j][3][4] = 0.0; c[i][j][4][0] = -dt * tx2 * ((0.40e+00 * (u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i - 1][j][k][4] * tmp1)) * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i - 1][j][k][1] * u[i - 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][2] * u[i - 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i - 1][j][k][3] * u[i - 1][j][k][3]) - c1345 * tmp2 * u[i - 1][j][k][4]); c[i][j][4][1] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i - 1][j][k][1] * u[i - 1][j][k][1] + u[i - 1][j][k][2] * u[i - 1][j][k][2] + u[i - 1][j][k][3] * u[i - 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i - 1][j][k][1]; c[i][j][4][2] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][2] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][2]; c[i][j][4][3] = -dt * tx2 * (-0.40e+00 * (u[i - 1][j][k][3] * u[i - 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i - 1][j][k][3]; c[i][j][4][4] = -dt * tx2 * (1.40e+00 * (u[i - 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacu(int k) { /*-------------------------------------------------------------------- c compute the upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; double r43; double c1345; double c34; double tmp1; double tmp2; double tmp3; r43 = 4.0 / 3.0; c1345 = 1.40e+00 * 1.00e-01 * 1.00e+00 * 1.40e+00; c34 = 1.00e-01 * 1.00e+00; #if defined(_OPENMP) #else #pragma omp parallel for private (tmp1,tmp2,tmp3,i,j) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (tmp1,tmp2,tmp3,j) firstprivate (k,r43,c1345,c34,tx1,tx2,ty1,ty2,tz1,tz2,dx1,dx2,dx3,dx4,dx5,dy1,dy2,dy3,dy4,dy5,dz1,dz2,dz3,dz4,dz5,dt) for (j = jst; j <= jend; j += 1) { #endif /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * (tx1 * dx1 + ty1 * dy1 + tz1 * dz1); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * (tx1 * (-r43 * c34 * tmp2 * u[i][j][k][1]) + ty1 * (-c34 * tmp2 * u[i][j][k][1]) + tz1 * (-c34 * tmp2 * u[i][j][k][1])); d[i][j][1][1] = 1.0 + dt * 2.0 * (tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx2 + ty1 * dy2 + tz1 * dz2); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][2]) + ty1 * (-r43 * c34 * tmp2 * u[i][j][k][2]) + tz1 * (-c34 * tmp2 * u[i][j][k][2])); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1) + dt * 2.0 * (tx1 * dx3 + ty1 * dy3 + tz1 * dz3); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * (tx1 * (-c34 * tmp2 * u[i][j][k][3]) + ty1 * (-c34 * tmp2 * u[i][j][k][3]) + tz1 * (-r43 * c34 * tmp2 * u[i][j][k][3])); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * (tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1) + dt * 2.0 * (tx1 * dx4 + ty1 * dy4 + tz1 * dz4); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * (tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + ty1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4]) + tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k][1] * u[i][j][k][1]) - (c34 - c1345) * tmp3 * (u[i][j][k][2] * u[i][j][k][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k][3] * u[i][j][k][3]) - c1345 * tmp2 * u[i][j][k][4])); d[i][j][4][1] = dt * 2.0 * (tx1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][1] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][1] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][1]); d[i][j][4][2] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][2] + ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][2] + tz1 * (c34 - c1345) * tmp2 * u[i][j][k][2]); d[i][j][4][3] = dt * 2.0 * (tx1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + ty1 * (c34 - c1345) * tmp2 * u[i][j][k][3] + tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k][3]); d[i][j][4][4] = 1.0 + dt * 2.0 * (tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1) + dt * 2.0 * (tx1 * dx5 + ty1 * dy5 + tz1 * dz5); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i + 1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = -dt * tx1 * dx1; a[i][j][0][1] = dt * tx2; a[i][j][0][2] = 0.0; a[i][j][0][3] = 0.0; a[i][j][0][4] = 0.0; a[i][j][1][0] = dt * tx2 * (-(u[i + 1][j][k][1] * tmp1) * (u[i + 1][j][k][1] * tmp1) + 0.40e+00 * 0.50 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-r43 * c34 * tmp2 * u[i + 1][j][k][1]); a[i][j][1][1] = dt * tx2 * ((2.0 - 0.40e+00) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (r43 * c34 * tmp1) - dt * tx1 * dx2; a[i][j][1][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * tmp1)); a[i][j][1][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * tmp1)); a[i][j][1][4] = dt * tx2 * 0.40e+00; a[i][j][2][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][2]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][2]); a[i][j][2][1] = dt * tx2 * (u[i + 1][j][k][2] * tmp1); a[i][j][2][2] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx3; a[i][j][2][3] = 0.0; a[i][j][2][4] = 0.0; a[i][j][3][0] = dt * tx2 * (-(u[i + 1][j][k][1] * u[i + 1][j][k][3]) * tmp2) - dt * tx1 * (-c34 * tmp2 * u[i + 1][j][k][3]); a[i][j][3][1] = dt * tx2 * (u[i + 1][j][k][3] * tmp1); a[i][j][3][2] = 0.0; a[i][j][3][3] = dt * tx2 * (u[i + 1][j][k][1] * tmp1) - dt * tx1 * (c34 * tmp1) - dt * tx1 * dx4; a[i][j][3][4] = 0.0; a[i][j][4][0] = dt * tx2 * ((0.40e+00 * (u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2 - 1.40e+00 * (u[i + 1][j][k][4] * tmp1)) * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * (-(r43 * c34 - c1345) * tmp3 * (u[i + 1][j][k][1] * u[i + 1][j][k][1]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][2] * u[i + 1][j][k][2]) - (c34 - c1345) * tmp3 * (u[i + 1][j][k][3] * u[i + 1][j][k][3]) - c1345 * tmp2 * u[i + 1][j][k][4]); a[i][j][4][1] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][4] * tmp1) - 0.50 * 0.40e+00 * ((3.0 * u[i + 1][j][k][1] * u[i + 1][j][k][1] + u[i + 1][j][k][2] * u[i + 1][j][k][2] + u[i + 1][j][k][3] * u[i + 1][j][k][3]) * tmp2)) - dt * tx1 * (r43 * c34 - c1345) * tmp2 * u[i + 1][j][k][1]; a[i][j][4][2] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][2] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][2]; a[i][j][4][3] = dt * tx2 * (-0.40e+00 * (u[i + 1][j][k][3] * u[i + 1][j][k][1]) * tmp2) - dt * tx1 * (c34 - c1345) * tmp2 * u[i + 1][j][k][3]; a[i][j][4][4] = dt * tx2 * (1.40e+00 * (u[i + 1][j][k][1] * tmp1)) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j + 1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = -dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = dt * ty2 * (-(u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][1]); b[i][j][1][1] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy2; b[i][j][1][2] = dt * ty2 * (u[i][j + 1][k][1] * tmp1); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = dt * ty2 * (-(u[i][j + 1][k][2] * tmp1) * (u[i][j + 1][k][2] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (-r43 * c34 * tmp2 * u[i][j + 1][k][2]); b[i][j][2][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * tmp1)); b[i][j][2][2] = dt * ty2 * ((2.0 - 0.40e+00) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (r43 * c34 * tmp1) - dt * ty1 * dy3; b[i][j][2][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][3] * tmp1)); b[i][j][2][4] = dt * ty2 * 0.40e+00; b[i][j][3][0] = dt * ty2 * (-(u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (-c34 * tmp2 * u[i][j + 1][k][3]); b[i][j][3][1] = 0.0; b[i][j][3][2] = dt * ty2 * (u[i][j + 1][k][3] * tmp1); b[i][j][3][3] = dt * ty2 * (u[i][j + 1][k][2] * tmp1) - dt * ty1 * (c34 * tmp1) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = dt * ty2 * ((0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][1] + u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2 - 1.40e+00 * (u[i][j + 1][k][4] * tmp1)) * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * (-(c34 - c1345) * tmp3 * (u[i][j + 1][k][1] * u[i][j + 1][k][1]) - (r43 * c34 - c1345) * tmp3 * (u[i][j + 1][k][2] * u[i][j + 1][k][2]) - (c34 - c1345) * tmp3 * (u[i][j + 1][k][3] * u[i][j + 1][k][3]) - c1345 * tmp2 * u[i][j + 1][k][4]); b[i][j][4][1] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][1] * u[i][j + 1][k][2]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][1]; b[i][j][4][2] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j + 1][k][1] * u[i][j + 1][k][1] + 3.0 * u[i][j + 1][k][2] * u[i][j + 1][k][2] + u[i][j + 1][k][3] * u[i][j + 1][k][3]) * tmp2)) - dt * ty1 * (r43 * c34 - c1345) * tmp2 * u[i][j + 1][k][2]; b[i][j][4][3] = dt * ty2 * (-0.40e+00 * (u[i][j + 1][k][2] * u[i][j + 1][k][3]) * tmp2) - dt * ty1 * (c34 - c1345) * tmp2 * u[i][j + 1][k][3]; b[i][j][4][4] = dt * ty2 * (1.40e+00 * (u[i][j + 1][k][2] * tmp1)) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k + 1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = -dt * tz1 * dz1; c[i][j][0][1] = 0.0; c[i][j][0][2] = 0.0; c[i][j][0][3] = dt * tz2; c[i][j][0][4] = 0.0; c[i][j][1][0] = dt * tz2 * (-(u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][1]); c[i][j][1][1] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2; c[i][j][1][2] = 0.0; c[i][j][1][3] = dt * tz2 * (u[i][j][k + 1][1] * tmp1); c[i][j][1][4] = 0.0; c[i][j][2][0] = dt * tz2 * (-(u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (-c34 * tmp2 * u[i][j][k + 1][2]); c[i][j][2][1] = 0.0; c[i][j][2][2] = dt * tz2 * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (c34 * tmp1) - dt * tz1 * dz3; c[i][j][2][3] = dt * tz2 * (u[i][j][k + 1][2] * tmp1); c[i][j][2][4] = 0.0; c[i][j][3][0] = dt * tz2 * (-(u[i][j][k + 1][3] * tmp1) * (u[i][j][k + 1][3] * tmp1) + 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (-r43 * c34 * tmp2 * u[i][j][k + 1][3]); c[i][j][3][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * tmp1)); c[i][j][3][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * tmp1)); c[i][j][3][3] = dt * tz2 * (2.0 - 0.40e+00) * (u[i][j][k + 1][3] * tmp1) - dt * tz1 * (r43 * c34 * tmp1) - dt * tz1 * dz4; c[i][j][3][4] = dt * tz2 * 0.40e+00; c[i][j][4][0] = dt * tz2 * ((0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2 - 1.40e+00 * (u[i][j][k + 1][4] * tmp1)) * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * (-(c34 - c1345) * tmp3 * (u[i][j][k + 1][1] * u[i][j][k + 1][1]) - (c34 - c1345) * tmp3 * (u[i][j][k + 1][2] * u[i][j][k + 1][2]) - (r43 * c34 - c1345) * tmp3 * (u[i][j][k + 1][3] * u[i][j][k + 1][3]) - c1345 * tmp2 * u[i][j][k + 1][4]); c[i][j][4][1] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][1] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][1]; c[i][j][4][2] = dt * tz2 * (-0.40e+00 * (u[i][j][k + 1][2] * u[i][j][k + 1][3]) * tmp2) - dt * tz1 * (c34 - c1345) * tmp2 * u[i][j][k + 1][2]; c[i][j][4][3] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][4] * tmp1) - 0.50 * 0.40e+00 * ((u[i][j][k + 1][1] * u[i][j][k + 1][1] + u[i][j][k + 1][2] * u[i][j][k + 1][2] + 3.0 * u[i][j][k + 1][3] * u[i][j][k + 1][3]) * tmp2)) - dt * tz1 * (r43 * c34 - c1345) * tmp2 * u[i][j][k + 1][3]; c[i][j][4][4] = dt * tz2 * (1.40e+00 * (u[i][j][k + 1][3] * tmp1)) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void l2norm(int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[64][65][65][5], double sum[5]) { { /*-------------------------------------------------------------------- c to compute the l2-norm of vector v. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; double sum0 = 0.0; double sum1 = 0.0; double sum2 = 0.0; double sum3 = 0.0; double sum4 = 0.0; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { sum[m] = 0.0; } #pragma omp parallel for private (i,j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k) reduction (+:sum0,sum1,sum2,sum3,sum4) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k) reduction (+:sum0,sum1,sum2,sum3,sum4) for (k = 1; k <= nz0 - 2; k += 1) { sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } { sum[0] += sum0; sum[1] += sum1; sum[2] += sum2; sum[3] += sum3; sum[4] += sum4; } for (m = 0; m <= 4; m += 1) { sum[m] = sqrt(sum[m] / ((nx0 - 2) * (ny0 - 2) * (nz0 - 2))); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pintgr() { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int ibeg; int ifin; int ifin1; int jbeg; int jfin; int jfin1; int iglob; int iglob1; int iglob2; int jglob; int jglob1; int jglob2; /* phi1(0:isiz2+1,0:isiz3+1) */ double phi1[66][66]; /* phi2(0:isiz2+1,0:isiz3+1) */ double phi2[66][66]; double frc1; double frc2; double frc3; /*-------------------------------------------------------------------- c set up the sub-domains for integeration in each processor --------------------------------------------------------------------*/ ibeg = nx; ifin = 0; iglob1 = -1; iglob2 = nx - 1; if (iglob1 >= ii1 && iglob2 < ii2 + nx) ibeg = 0; if (iglob1 >= ii1 - nx && iglob2 <= ii2) ifin = nx; if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1; if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2; jbeg = ny; jfin = -1; jglob1 = 0; jglob2 = ny - 1; if (jglob1 >= ji1 && jglob2 < ji2 + ny) jbeg = 0; if (jglob1 > ji1 - ny && jglob2 <= ji2) jfin = ny; if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1; if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2; ifin1 = ifin; jfin1 = jfin; if (ifin1 == ii2) ifin1 = ifin - 1; if (jfin1 == ji2) jfin1 = jfin - 1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } #pragma omp parallel for private (k,iglob,jglob,i,j) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k,jglob,j) for (j = jbeg; j <= jfin; j += 1) { jglob = j; k = ki1; phi1[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); k = ki2; phi2[i][j] = 0.40e+00 * (u[i][j][k][4] - 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]); } } frc1 = 0.0; #pragma omp parallel for private (i,j) reduction (+:frc1) for (i = ibeg; i <= ifin1; i += 1) { #pragma omp parallel for private (j) reduction (+:frc1) for (j = jbeg; j <= jfin1; j += 1) { frc1 = frc1 + (phi1[i][j] + phi1[i + 1][j] + phi1[i][j + 1] + phi1[i + 1][j + 1] + phi2[i][j] + phi2[i + 1][j] + phi2[i][j + 1] + phi2[i + 1][j + 1]); } } frc1 = dxi * deta * frc1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } jglob = jbeg; if (jglob == ji1) { #pragma omp parallel for private (iglob,i,k) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k) for (k = ki1; k <= ki2; k += 1) { phi1[i][k] = 0.40e+00 * (u[i][jbeg][k][4] - 0.50 * (u[i][jbeg][k][1] * u[i][jbeg][k][1] + u[i][jbeg][k][2] * u[i][jbeg][k][2] + u[i][jbeg][k][3] * u[i][jbeg][k][3]) / u[i][jbeg][k][0]); } } } jglob = jfin; if (jglob == ji2) { #pragma omp parallel for private (iglob,i,k) for (i = ibeg; i <= ifin; i += 1) { iglob = i; #pragma omp parallel for private (k) for (k = ki1; k <= ki2; k += 1) { phi2[i][k] = 0.40e+00 * (u[i][jfin][k][4] - 0.50 * (u[i][jfin][k][1] * u[i][jfin][k][1] + u[i][jfin][k][2] * u[i][jfin][k][2] + u[i][jfin][k][3] * u[i][jfin][k][3]) / u[i][jfin][k][0]); } } } frc2 = 0.0; #pragma omp parallel for private (i,k) reduction (+:frc2) firstprivate (ifin1) for (i = ibeg; i <= ifin1; i += 1) { #pragma omp parallel for private (k) reduction (+:frc2) for (k = ki1; k <= ki2 - 1; k += 1) { frc2 = frc2 + (phi1[i][k] + phi1[i + 1][k] + phi1[i][k + 1] + phi1[i + 1][k + 1] + phi2[i][k] + phi2[i + 1][k] + phi2[i][k + 1] + phi2[i + 1][k + 1]); } } frc2 = dxi * dzeta * frc2; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for private (i,k) for (i = 0; i <= 65; i += 1) { #pragma omp parallel for private (k) for (k = 0; k <= 65; k += 1) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } iglob = ibeg; if (iglob == ii1) { #pragma omp parallel for private (jglob,j,k) for (j = jbeg; j <= jfin; j += 1) { jglob = j; #pragma omp parallel for private (k) firstprivate (ibeg) for (k = ki1; k <= ki2; k += 1) { phi1[j][k] = 0.40e+00 * (u[ibeg][j][k][4] - 0.50 * (u[ibeg][j][k][1] * u[ibeg][j][k][1] + u[ibeg][j][k][2] * u[ibeg][j][k][2] + u[ibeg][j][k][3] * u[ibeg][j][k][3]) / u[ibeg][j][k][0]); } } } iglob = ifin; if (iglob == ii2) { #pragma omp parallel for private (jglob,j,k) firstprivate (jfin) for (j = jbeg; j <= jfin; j += 1) { jglob = j; #pragma omp parallel for private (k) firstprivate (ifin) for (k = ki1; k <= ki2; k += 1) { phi2[j][k] = 0.40e+00 * (u[ifin][j][k][4] - 0.50 * (u[ifin][j][k][1] * u[ifin][j][k][1] + u[ifin][j][k][2] * u[ifin][j][k][2] + u[ifin][j][k][3] * u[ifin][j][k][3]) / u[ifin][j][k][0]); } } } frc3 = 0.0; #pragma omp parallel for private (j,k) reduction (+:frc3) firstprivate (jfin1,ki1,ki2) for (j = jbeg; j <= jfin1; j += 1) { #pragma omp parallel for private (k) reduction (+:frc3) for (k = ki1; k <= ki2 - 1; k += 1) { frc3 = frc3 + (phi1[j][k] + phi1[j + 1][k] + phi1[j][k + 1] + phi1[j + 1][k + 1] + phi2[j][k] + phi2[j + 1][k] + phi2[j][k + 1] + phi2[j + 1][k + 1]); } } frc3 = deta * dzeta * frc3; frc = 0.25 * (frc1 + frc2 + frc3); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void read_input() { FILE *fp; /*-------------------------------------------------------------------- c if input file does not exist, it uses defaults c ipr = 1 for detailed progress output c inorm = how often the norm is printed (once every inorm iterations) c itmax = number of pseudo time steps c dt = time step c omega 1 over-relaxation factor for SSOR c tolrsd = steady state residual tolerance levels c nx, ny, nz = number of grid points in x, y, z directions --------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - LU Benchmark\n\n"); fp = fopen("inputlu.data", "r"); if (fp != ((void *)0)) { printf(" Reading from input file inputlu.data\n"); while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; fscanf(fp, "%d%d", &ipr, &inorm); while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; fscanf(fp, "%d", &itmax); while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; fscanf(fp, "%lf", &dt); while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; fscanf(fp, "%lf", &omega); while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; fscanf(fp, "%lf%lf%lf%lf%lf", &tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]); while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; while (fgetc(fp) != '\n') ; fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0); while (fgetc(fp) != '\n') ; fclose(fp); } else { ipr = 1; inorm = 250; itmax = 250; dt = 2.0; omega = 1.2; tolrsd[0] = 1.0e-8; tolrsd[1] = 1.0e-8; tolrsd[2] = 1.0e-8; tolrsd[3] = 1.0e-8; tolrsd[4] = 1.0e-8; nx0 = 64; ny0 = 64; nz0 = 64; } /*-------------------------------------------------------------------- c check problem size --------------------------------------------------------------------*/ if (nx0 < 4 || ny0 < 4 || nz0 < 4) { printf(" PROBLEM SIZE IS TOO SMALL - \n SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); exit(1); } if (nx0 > 64 || ny0 > 64 || nz0 > 64) { printf(" PROBLEM SIZE IS TOO LARGE - \n NX, NY AND NZ SHOULD BE EQUAL TO \n ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); exit(1); } printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0); printf(" Iterations: %3d\n", itmax); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs() { { /*-------------------------------------------------------------------- c compute the right hand sides --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int L1; int L2; int ist1; int iend1; int jst1; int jend1; double q; double u21; double u31; double u41; double tmp; double u21i; double u31i; double u41i; double u51i; double u21j; double u31j; double u41j; double u51j; double u21k; double u31k; double u41k; double u51k; double u21im1; double u31im1; double u41im1; double u51im1; double u21jm1; double u31jm1; double u41jm1; double u51jm1; double u21km1; double u31km1; double u41km1; double u51km1; #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= nx - 1; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= ny - 1; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= nz - 1; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = -frct[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx - 1; #pragma omp parallel for private (q,u21,i,j,k) firstprivate (L2) for (i = L1; i <= L2; i += 1) { #pragma omp parallel for private (q,u21,j,k) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u21,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][1]; u21 = u[i][j][k][1] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u21 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][2] = u[i][j][k][2] * u21; flux[i][j][k][3] = u[i][j][k][3] * u21; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u21; } } } #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (L2,ist1,iend1,tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i,k,m) firstprivate (nx) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (i,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (m) firstprivate (tx2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * (flux[i + 1][j][k][m] - flux[i - 1][j][k][m]); } } L2 = nx - 1; #pragma omp parallel for private (tmp,u21i,u31i,u41i,u51i,u21im1,u31im1,u41im1,u51im1,i) firstprivate (L2) for (i = ist; i <= L2; i += 1) { tmp = 1.0 / u[i][j][k][0]; u21i = tmp * u[i][j][k][1]; u31i = tmp * u[i][j][k][2]; u41i = tmp * u[i][j][k][3]; u51i = tmp * u[i][j][k][4]; tmp = 1.0 / u[i - 1][j][k][0]; u21im1 = tmp * u[i - 1][j][k][1]; u31im1 = tmp * u[i - 1][j][k][2]; u41im1 = tmp * u[i - 1][j][k][3]; u51im1 = tmp * u[i - 1][j][k][4]; flux[i][j][k][1] = 4.0 / 3.0 * tx3 * (u21i - u21im1); flux[i][j][k][2] = tx3 * (u31i - u31im1); flux[i][j][k][3] = tx3 * (u41i - u41im1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tx3 * (u21i * u21i + u31i * u31i + u41i * u41i - (u21im1 * u21im1 + u31im1 * u31im1 + u41im1 * u41im1)) + 1.0 / 6.0 * tx3 * (u21i * u21i - u21im1 * u21im1) + 1.40e+00 * 1.40e+00 * tx3 * (u51i - u51im1); } #pragma omp parallel for private (i) firstprivate (tx1,tx3,dx1,dx2,dx3,dx4,dx5) for (i = ist; i <= iend; i += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * (u[i - 1][j][k][0] - 2.0 * u[i][j][k][0] + u[i + 1][j][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][1] - flux[i][j][k][1]) + dx2 * tx1 * (u[i - 1][j][k][1] - 2.0 * u[i][j][k][1] + u[i + 1][j][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][2] - flux[i][j][k][2]) + dx3 * tx1 * (u[i - 1][j][k][2] - 2.0 * u[i][j][k][2] + u[i + 1][j][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][3] - flux[i][j][k][3]) + dx4 * tx1 * (u[i - 1][j][k][3] - 2.0 * u[i][j][k][3] + u[i + 1][j][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * 1.00e-01 * 1.00e+00 * (flux[i + 1][j][k][4] - flux[i][j][k][4]) + dx5 * tx1 * (u[i - 1][j][k][4] - 2.0 * u[i][j][k][4] + u[i + 1][j][k][4]); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * (+5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m]); rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * (-4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m]); } ist1 = 3; iend1 = nx - 4; #pragma omp parallel for private (i,m) firstprivate (iend1) for (i = ist1; i <= iend1; i += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i - 2][j][k][m] - 4.0 * u[i - 1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i + 1][j][k][m] + u[i + 2][j][k][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[nx - 3][j][k][m] = rsd[nx - 3][j][k][m] - dssp * (u[nx - 5][j][k][m] - 4.0 * u[nx - 4][j][k][m] + 6.0 * u[nx - 3][j][k][m] - 4.0 * u[nx - 2][j][k][m]); rsd[nx - 2][j][k][m] = rsd[nx - 2][j][k][m] - dssp * (u[nx - 4][j][k][m] - 4.0 * u[nx - 3][j][k][m] + 5.0 * u[nx - 2][j][k][m]); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny - 1; #pragma omp parallel for private (q,u31,i,j,k) firstprivate (L1,L2) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u31,j,k) for (j = L1; j <= L2; j += 1) { #pragma omp parallel for private (q,u31,k) for (k = 1; k <= nz - 2; k += 1) { flux[i][j][k][0] = u[i][j][k][2]; u31 = u[i][j][k][2] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u31; flux[i][j][k][2] = u[i][j][k][2] * u31 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][3] = u[i][j][k][3] * u31; flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u31; } } } #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,i,j,k,m) firstprivate (nz) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (L2,jst1,jend1,tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j,k,m) firstprivate (ny) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (j,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (m) firstprivate (ty2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * (flux[i][j + 1][k][m] - flux[i][j - 1][k][m]); } } L2 = ny - 1; #pragma omp parallel for private (tmp,u21j,u31j,u41j,u51j,u21jm1,u31jm1,u41jm1,u51jm1,j) firstprivate (L2) for (j = jst; j <= L2; j += 1) { tmp = 1.0 / u[i][j][k][0]; u21j = tmp * u[i][j][k][1]; u31j = tmp * u[i][j][k][2]; u41j = tmp * u[i][j][k][3]; u51j = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j - 1][k][0]; u21jm1 = tmp * u[i][j - 1][k][1]; u31jm1 = tmp * u[i][j - 1][k][2]; u41jm1 = tmp * u[i][j - 1][k][3]; u51jm1 = tmp * u[i][j - 1][k][4]; flux[i][j][k][1] = ty3 * (u21j - u21jm1); flux[i][j][k][2] = 4.0 / 3.0 * ty3 * (u31j - u31jm1); flux[i][j][k][3] = ty3 * (u41j - u41jm1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * ty3 * (u21j * u21j + u31j * u31j + u41j * u41j - (u21jm1 * u21jm1 + u31jm1 * u31jm1 + u41jm1 * u41jm1)) + 1.0 / 6.0 * ty3 * (u31j * u31j - u31jm1 * u31jm1) + 1.40e+00 * 1.40e+00 * ty3 * (u51j - u51jm1); } #pragma omp parallel for private (j) firstprivate (ty1,ty3,dy1,dy2,dy3,dy4,dy5) for (j = jst; j <= jend; j += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * (u[i][j - 1][k][0] - 2.0 * u[i][j][k][0] + u[i][j + 1][k][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][1] - flux[i][j][k][1]) + dy2 * ty1 * (u[i][j - 1][k][1] - 2.0 * u[i][j][k][1] + u[i][j + 1][k][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][2] - flux[i][j][k][2]) + dy3 * ty1 * (u[i][j - 1][k][2] - 2.0 * u[i][j][k][2] + u[i][j + 1][k][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][3] - flux[i][j][k][3]) + dy4 * ty1 * (u[i][j - 1][k][3] - 2.0 * u[i][j][k][3] + u[i][j + 1][k][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * 1.00e-01 * 1.00e+00 * (flux[i][j + 1][k][4] - flux[i][j][k][4]) + dy5 * ty1 * (u[i][j - 1][k][4] - 2.0 * u[i][j][k][4] + u[i][j + 1][k][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * (+5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m]); rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * (-4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m]); } jst1 = 3; jend1 = ny - 4; #pragma omp parallel for private (j,m) firstprivate (jend1) for (j = jst1; j <= jend1; j += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j - 2][k][m] - 4.0 * u[i][j - 1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j + 1][k][m] + u[i][j + 2][k][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[i][ny - 3][k][m] = rsd[i][ny - 3][k][m] - dssp * (u[i][ny - 5][k][m] - 4.0 * u[i][ny - 4][k][m] + 6.0 * u[i][ny - 3][k][m] - 4.0 * u[i][ny - 2][k][m]); rsd[i][ny - 2][k][m] = rsd[i][ny - 2][k][m] - dssp * (u[i][ny - 4][k][m] - 4.0 * u[i][ny - 3][k][m] + 5.0 * u[i][ny - 2][k][m]); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,i,j,k,m) firstprivate (iend,jst,jend) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (q,u41,tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (q,u41,k) for (k = 0; k <= nz - 1; k += 1) { flux[i][j][k][0] = u[i][j][k][3]; u41 = u[i][j][k][3] / u[i][j][k][0]; q = 0.50 * (u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3]) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u41; flux[i][j][k][2] = u[i][j][k][2] * u41; flux[i][j][k][3] = u[i][j][k][3] * u41 + 0.40e+00 * (u[i][j][k][4] - q); flux[i][j][k][4] = (1.40e+00 * u[i][j][k][4] - 0.40e+00 * q) * u41; } #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tz2) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * (flux[i][j][k + 1][m] - flux[i][j][k - 1][m]); } } #pragma omp parallel for private (tmp,u21k,u31k,u41k,u51k,u21km1,u31km1,u41km1,u51km1,k) for (k = 1; k <= nz - 1; k += 1) { tmp = 1.0 / u[i][j][k][0]; u21k = tmp * u[i][j][k][1]; u31k = tmp * u[i][j][k][2]; u41k = tmp * u[i][j][k][3]; u51k = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j][k - 1][0]; u21km1 = tmp * u[i][j][k - 1][1]; u31km1 = tmp * u[i][j][k - 1][2]; u41km1 = tmp * u[i][j][k - 1][3]; u51km1 = tmp * u[i][j][k - 1][4]; flux[i][j][k][1] = tz3 * (u21k - u21km1); flux[i][j][k][2] = tz3 * (u31k - u31km1); flux[i][j][k][3] = 4.0 / 3.0 * tz3 * (u41k - u41km1); flux[i][j][k][4] = 0.50 * (1.0 - 1.40e+00 * 1.40e+00) * tz3 * (u21k * u21k + u31k * u31k + u41k * u41k - (u21km1 * u21km1 + u31km1 * u31km1 + u41km1 * u41km1)) + 1.0 / 6.0 * tz3 * (u41k * u41k - u41km1 * u41km1) + 1.40e+00 * 1.40e+00 * tz3 * (u51k - u51km1); } #pragma omp parallel for private (k) firstprivate (tz1,tz3,dz1,dz2,dz3,dz4,dz5) for (k = 1; k <= nz - 2; k += 1) { rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * (u[i][j][k - 1][0] - 2.0 * u[i][j][k][0] + u[i][j][k + 1][0]); rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][1] - flux[i][j][k][1]) + dz2 * tz1 * (u[i][j][k - 1][1] - 2.0 * u[i][j][k][1] + u[i][j][k + 1][1]); rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][2] - flux[i][j][k][2]) + dz3 * tz1 * (u[i][j][k - 1][2] - 2.0 * u[i][j][k][2] + u[i][j][k + 1][2]); rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][3] - flux[i][j][k][3]) + dz4 * tz1 * (u[i][j][k - 1][3] - 2.0 * u[i][j][k][3] + u[i][j][k + 1][3]); rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * 1.00e-01 * 1.00e+00 * (flux[i][j][k + 1][4] - flux[i][j][k][4]) + dz5 * tz1 * (u[i][j][k - 1][4] - 2.0 * u[i][j][k][4] + u[i][j][k + 1][4]); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * (+5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m]); rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * (-4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m]); } #pragma omp parallel for private (k,m) for (k = 3; k <= nz - 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * (u[i][j][k - 2][m] - 4.0 * u[i][j][k - 1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k + 1][m] + u[i][j][k + 2][m]); } } #pragma omp parallel for private (m) firstprivate (dssp) for (m = 0; m <= 4; m += 1) { rsd[i][j][nz - 3][m] = rsd[i][j][nz - 3][m] - dssp * (u[i][j][nz - 5][m] - 4.0 * u[i][j][nz - 4][m] + 6.0 * u[i][j][nz - 3][m] - 4.0 * u[i][j][nz - 2][m]); rsd[i][j][nz - 2][m] = rsd[i][j][nz - 2][m] - dssp * (u[i][j][nz - 4][m] - 4.0 * u[i][j][nz - 3][m] + 5.0 * u[i][j][nz - 2][m]); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setbv() { { /*-------------------------------------------------------------------- c set the boundary values of dependent variables --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int iglob; int jglob; /*-------------------------------------------------------------------- c set the dependent variable values along the top and bottom faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (j = 0; j <= ny - 1; j += 1) { jglob = j; exact(iglob, jglob, 0, &u[i][j][0][0]); exact(iglob, jglob, nz - 1, &u[i][j][nz - 1][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along north and south faces --------------------------------------------------------------------*/ for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob, 0, k, &u[i][0][k][0]); } } for (i = 0; i <= nx - 1; i += 1) { iglob = i; for (k = 0; k <= nz - 1; k += 1) { exact(iglob, ny0 - 1, k, &u[i][ny - 1][k][0]); } } /*-------------------------------------------------------------------- c set the dependent variable values along east and west faces --------------------------------------------------------------------*/ for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(0, jglob, k, &u[0][j][k][0]); } } for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 0; k <= nz - 1; k += 1) { exact(nx0 - 1, jglob, k, &u[nx - 1][j][k][0]); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setcoeff() { /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ dxi = 1.0 / (nx0 - 1); deta = 1.0 / (ny0 - 1); dzeta = 1.0 / (nz0 - 1); tx1 = 1.0 / (dxi * dxi); tx2 = 1.0 / (2.0 * dxi); tx3 = 1.0 / dxi; ty1 = 1.0 / (deta * deta); ty2 = 1.0 / (2.0 * deta); ty3 = 1.0 / deta; tz1 = 1.0 / (dzeta * dzeta); tz2 = 1.0 / (2.0 * dzeta); tz3 = 1.0 / dzeta; ii1 = 1; ii2 = nx0 - 2; ji1 = 1; ji2 = ny0 - 3; ki1 = 2; ki2 = nz0 - 2; /*-------------------------------------------------------------------- c diffusion coefficients --------------------------------------------------------------------*/ dx1 = 0.75; dx2 = dx1; dx3 = dx1; dx4 = dx1; dx5 = dx1; dy1 = 0.75; dy2 = dy1; dy3 = dy1; dy4 = dy1; dy5 = dy1; dz1 = 1.00; dz2 = dz1; dz3 = dz1; dz4 = dz1; dz5 = dz1; /*-------------------------------------------------------------------- c fourth difference dissipation --------------------------------------------------------------------*/ dssp = ((dx1 > ((dy1 > dz1 ? dy1 : dz1)) ? dx1 : ((dy1 > dz1 ? dy1 : dz1)))) / 4.0; /*-------------------------------------------------------------------- c coefficients of the exact solution to the first pde --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 5.0e-01; ce[0][7] = 2.0e-02; ce[0][8] = 1.0e-02; ce[0][9] = 3.0e-02; ce[0][10] = 5.0e-01; ce[0][11] = 4.0e-01; ce[0][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the second pde --------------------------------------------------------------------*/ ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 1.0e-02; ce[1][8] = 3.0e-02; ce[1][9] = 2.0e-02; ce[1][10] = 4.0e-01; ce[1][11] = 3.0e-01; ce[1][12] = 5.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the third pde --------------------------------------------------------------------*/ ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 4.0e-02; ce[2][8] = 3.0e-02; ce[2][9] = 5.0e-02; ce[2][10] = 3.0e-01; ce[2][11] = 5.0e-01; ce[2][12] = 4.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fourth pde --------------------------------------------------------------------*/ ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 3.0e-02; ce[3][8] = 5.0e-02; ce[3][9] = 4.0e-02; ce[3][10] = 2.0e-01; ce[3][11] = 1.0e-01; ce[3][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fifth pde --------------------------------------------------------------------*/ ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 1.0e-01; ce[4][5] = 4.0e-01; ce[4][6] = 3.0e-01; ce[4][7] = 5.0e-02; ce[4][8] = 4.0e-02; ce[4][9] = 3.0e-02; ce[4][10] = 1.0e-01; ce[4][11] = 3.0e-01; ce[4][12] = 2.0e-01; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setiv() { { /*-------------------------------------------------------------------- c c set the initial values of independent variables based on tri-linear c interpolation of boundary values in the computational space. c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int iglob; int jglob; double xi; double eta; double zeta; double pxi; double peta; double pzeta; double ue_1jk[5]; double ue_nx0jk[5]; double ue_i1k[5]; double ue_iny0k[5]; double ue_ij1[5]; double ue_ijnz[5]; for (j = 0; j <= ny - 1; j += 1) { jglob = j; for (k = 1; k <= nz - 1 - 1; k += 1) { zeta = ((double)k) / (nz - 1); if (jglob != 0 && jglob != ny0 - 1) { eta = ((double)jglob) / (ny0 - 1); for (i = 0; i <= nx - 1; i += 1) { iglob = i; if (iglob != 0 && iglob != nx0 - 1) { xi = ((double)iglob) / (nx0 - 1); exact(0, jglob, k, ue_1jk); exact(nx0 - 1, jglob, k, ue_nx0jk); exact(iglob, 0, k, ue_i1k); exact(iglob, ny0 - 1, k, ue_iny0k); exact(iglob, jglob, 0, ue_ij1); exact(iglob, jglob, nz - 1, ue_ijnz); #pragma omp parallel for private (pxi,peta,pzeta,m) firstprivate (xi,eta,zeta) for (m = 0; m <= 4; m += 1) { pxi = (1.0 - xi) * ue_1jk[m] + xi * ue_nx0jk[m]; peta = (1.0 - eta) * ue_i1k[m] + eta * ue_iny0k[m]; pzeta = (1.0 - zeta) * ue_ij1[m] + zeta * ue_ijnz[m]; u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ssor() { /*-------------------------------------------------------------------- c to perform pseudo-time stepping SSOR iterations c for five nonlinear pde s. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i; int j; int k; int m; int istep; double tmp; double delunm[5]; double tv[64][64][5]; /*-------------------------------------------------------------------- c begin pseudo-time stepping iterations --------------------------------------------------------------------*/ tmp = 1.0 / (omega * (2.0 - omega)); /*-------------------------------------------------------------------- c initialize a,b,c,d to zero (guarantees that page tables have been c formed, if applicable on given architecture, before timestepping). --------------------------------------------------------------------*/ { #pragma omp parallel for private (i,j,k,m) for (i = 0; i <= 63; i += 1) { #pragma omp parallel for private (j,k,m) for (j = 0; j <= 63; j += 1) { #pragma omp parallel for private (k,m) for (k = 0; k <= 4; k += 1) { #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { a[i][j][k][m] = 0.0; b[i][j][k][m] = 0.0; c[i][j][k][m] = 0.0; d[i][j][k][m] = 0.0; } } } } } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the L2 norms of newton iteration residuals --------------------------------------------------------------------*/ l2norm(nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm); timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c the timestep loop --------------------------------------------------------------------*/ for (istep = 1; istep <= itmax; istep += 1) { if (istep % 20 == 0 || istep == itmax || istep == 1) { printf(" Time step %4d\n", istep); } { /*-------------------------------------------------------------------- c perform SSOR iteration --------------------------------------------------------------------*/ #pragma omp parallel for private (i,j,k,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k,m) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (dt) for (m = 0; m <= 4; m += 1) { rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } for (k = 1; k <= nz - 2; k += 1) { /*-------------------------------------------------------------------- c form the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacld(k); /*-------------------------------------------------------------------- c perform the lower triangular solution --------------------------------------------------------------------*/ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0); } for (k = nz - 2; k >= 1; k += -1) { /*-------------------------------------------------------------------- c form the strictly upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacu(k); /*-------------------------------------------------------------------- c perform the upper triangular solution --------------------------------------------------------------------*/ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0); } /*-------------------------------------------------------------------- c update the variables --------------------------------------------------------------------*/ #pragma omp parallel for private (i,j,k,m) for (i = ist; i <= iend; i += 1) { #pragma omp parallel for private (j,k,m) firstprivate (nz) for (j = jst; j <= jend; j += 1) { #pragma omp parallel for private (k,m) for (k = 1; k <= nz - 2; k += 1) { #pragma omp parallel for private (m) firstprivate (tmp) for (m = 0; m <= 4; m += 1) { u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } /* end parallel */ } /*-------------------------------------------------------------------- c compute the max-norms of newton iteration corrections --------------------------------------------------------------------*/ if (istep % inorm == 0) { l2norm(nx0, ny0, nz0, ist, iend, jst, jend, rsd, delunm); } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the max-norms of newton iteration residuals --------------------------------------------------------------------*/ if (istep % inorm == 0 || istep == itmax) { l2norm(nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm); } /*-------------------------------------------------------------------- c check the newton-iteration residuals against the tolerance levels --------------------------------------------------------------------*/ if (rsdnm[0] < tolrsd[0] && rsdnm[1] < tolrsd[1] && rsdnm[2] < tolrsd[2] && rsdnm[3] < tolrsd[3] && rsdnm[4] < tolrsd[4]) { exit(1); } } timer_stop(1); maxtime = timer_read(1); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(double xcr[5], double xce[5], double xci, char *class, boolean * verified) { /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5]; double xceref[5]; double xciref; double xcrdif[5]; double xcedif[5]; double xcidif; double epsilon; double dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; *class = 'U'; *verified = 1; #pragma omp parallel for private (m) for (m = 0; m <= 4; m += 1) { xcrref[m] = 1.0; xceref[m] = 1.0; } xciref = 1.0; if (nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) { *class = 'S'; dtref = 5.0e-1; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xcrref[0] = 1.6196343210976702e-02; xcrref[1] = 2.1976745164821318e-03; xcrref[2] = 1.5179927653399185e-03; xcrref[3] = 1.5029584435994323e-03; xcrref[4] = 3.4264073155896461e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xceref[0] = 6.4223319957960924e-04; xceref[1] = 8.4144342047347926e-05; xceref[2] = 5.8588269616485186e-05; xceref[3] = 5.8474222595157350e-05; xceref[4] = 1.3103347914111294e-03; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xciref = 7.8418928865937083; } else if (nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) { /* SPEC95fp size */ *class = 'W'; dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (33x33x33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xcrref[0] = 0.1236511638192e+02; xcrref[1] = 0.1317228477799e+01; xcrref[2] = 0.2550120713095e+01; xcrref[3] = 0.2326187750252e+01; xcrref[4] = 0.2826799444189e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (33X33X33) grid, --------------------------------------------------------------------*/ xceref[0] = 0.4867877144216; xceref[1] = 0.5064652880982e-01; xceref[2] = 0.9281818101960e-01; xceref[3] = 0.8570126542733e-01; xceref[4] = 0.1084277417792e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (33X33X33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xciref = 0.1161399311023e+02; } else if (nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) { *class = 'A'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 7.7902107606689367e+02; xcrref[1] = 6.3402765259692870e+01; xcrref[2] = 1.9499249727292479e+02; xcrref[3] = 1.7845301160418537e+02; xcrref[4] = 1.8384760349464247e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.9964085685471943e+01; xceref[1] = 2.8194576365003349; xceref[2] = 7.3473412698774742; xceref[3] = 6.7139225687777051; xceref[4] = 7.0715315688392578e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 2.6030925604886277e+01; } else if (nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) { *class = 'B'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 3.5532672969982736e+03; xcrref[1] = 2.6214750795310692e+02; xcrref[2] = 8.8333721850952190e+02; xcrref[3] = 7.7812774739425265e+02; xcrref[4] = 7.3087969592545314e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (102X102X102) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 1.1401176380212709e+02; xceref[1] = 8.1098963655421574; xceref[2] = 2.8480597317698308e+01; xceref[3] = 2.5905394567832939e+01; xceref[4] = 2.6054907504857413e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 4.7887162703308227e+01; } else if (nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) { *class = 'C'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 1.03766980323537846e+04; xcrref[1] = 8.92212458801008552e+02; xcrref[2] = 2.56238814582660871e+03; xcrref[3] = 2.19194343857831427e+03; xcrref[4] = 1.78078057261061185e+04; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (162X162X162) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.15986399716949279e+02; xceref[1] = 1.55789559239863600e+01; xceref[2] = 5.41318863077207766e+01; xceref[3] = 4.82262643154045421e+01; xceref[4] = 4.55902910043250358e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 6.66404553572181300e+01; } else { *verified = 0; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ for (m = 0; m <= 4; m += 1) { xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]); xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]); } xcidif = fabs((xci - xciref) / xciref); /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if ((*class) != 'U') { printf("\n Verification being performed for class %1c\n", (*class)); printf(" Accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt - dtref) > epsilon) { *verified = 0; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if ((*class) != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m <= 4; m += 1) { if ((*class) == 'U') { printf(" %2d %20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m, xcr[m], xcrref[m], xcrdif[m]); } } if ((*class) != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m <= 4; m += 1) { if ((*class) == 'U') { printf(" %2d %20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = 0; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m, xce[m], xceref[m], xcedif[m]); } } if ((*class) != 'U') { printf(" Comparison of surface integral\n"); } else { printf(" Surface integral\n"); } if ((*class) == 'U') { printf(" %20.13e\n", xci); } else if (xcidif > epsilon) { *verified = 0; printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } else { printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } if ((*class) == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } }
omp_parallel_for_if.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel for if</ompts:directive> <ompts:dependences></ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){ int known_sum; <ompts:orphan:vars> int num_threads; int sum, sum2; int i; int control; </ompts:orphan:vars> control = 0; num_threads=0; sum = 0; sum2 = 0; #pragma omp parallel for private(i) <ompts:check>if (control==1)</ompts:check> <ompts:orphan> for (i=0; i <= LOOPCOUNT; i++) { num_threads = omp_get_num_threads(); sum = sum + i; } /*end of for*/ </ompts:orphan> known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads); return (known_sum == sum && num_threads == 1); } /* end of check_paralel_for_private */ </ompts:testcode> </ompts:test>
<ompts:test> <ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel for if</ompts:directive> <ompts:dependences></ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){ int known_sum; <ompts:orphan:vars> int num_threads; int sum, sum2; int i; int control; </ompts:orphan:vars> control = 0; num_threads=0; sum = 0; sum2 = 0; <ompts:orphan> for (i=0; i <= LOOPCOUNT; i++) { num_threads = omp_get_num_threads(); sum = sum + i; } /*end of for*/ </ompts:orphan> known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads); return (known_sum == sum && num_threads == 1); } /* end of check_paralel_for_private */ </ompts:testcode> </ompts:test>
<ompts:test> <ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel for if</ompts:directive> <ompts:dependences></ompts:dependences> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){ int known_sum; <ompts:orphan:vars> int num_threads; int sum, sum2; int i; int control; </ompts:orphan:vars> control = 0; num_threads=0; sum = 0; sum2 = 0; #pragma omp parallel for private(i) <ompts:check>if (control==1)</ompts:check> <ompts:orphan> for (i=0; i <= LOOPCOUNT; i++) { num_threads = omp_get_num_threads(); sum = sum + i; } /*end of for*/ </ompts:orphan> known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads); return (known_sum == sum && num_threads == 1); } /* end of check_paralel_for_private */ </ompts:testcode> </ompts:test>
parallel_residualbased_elimination_builder_and_solver.h
/* ============================================================================== Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ /* ********************************************************* * * Last Modified by: $Author: nelson $ * Date: $Date: 2008-12-04 17:12:56 $ * Revision: $Revision: 1.7 $ * * ***********************************************************/ #if !defined(KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER ) #define KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER /* System includes */ #include <set> #include <omp.h> /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. Current class provides an implementation for standard builder and solving operations. the RHS is constituted by the unbalanced loads (residual) Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace , //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ParallelResidualBasedEliminationBuilderAndSolver : public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ //typedef boost::shared_ptr< ParallelResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver> > Pointer; KRATOS_CLASS_POINTER_DEFINITION( ParallelResidualBasedEliminationBuilderAndSolver ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor. */ ParallelResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver) { /* std::cout << "using the standard builder and solver " << std::endl; */ } /** Destructor. */ virtual ~ParallelResidualBasedEliminationBuilderAndSolver(){} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY if(!pScheme) KRATOS_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //create a partition of the element array int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); KRATOS_WATCH( number_of_threads ); KRATOS_WATCH( element_partition ); double start_prod = omp_get_wtime(); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); #pragma omp critical { //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } } } vector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1]; // assemble all elements for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); #pragma omp critical { //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); } } } double stop_prod = omp_get_wtime(); std::cout << "time: " << stop_prod - start_prod << std::endl; KRATOS_WATCH("finished parallel building"); /* LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); } */ //for( int i=0; i<A.size1(); i++ ) //{ // for( int j=0; j<A.size2(); j++ ) // { // std::cout << A(i,j); // } // std::cout << std::endl; //} KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SystemSolve( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY double start_solve = omp_get_wtime(); double norm_b; if(b.size() != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if(norm_b != 0.00) BaseType::mpLinearSystemSolver->Solve(A,Dx,b); else TSparseSpace::SetToZero(Dx); //prints informations about the current time if (BaseType::GetEchoLevel()>1) { std::cout << *(BaseType::mpLinearSystemSolver) << std::endl; } double stop_solve= omp_get_wtime(); std::cout << "time: " << stop_solve - start_solve << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY boost::timer building_time; Build(pScheme,r_model_part,A,b); if(BaseType::GetEchoLevel()>0) { std::cout << "Building Time : " << building_time.elapsed() << std::endl; } // ApplyPointLoads(pScheme,r_model_part,b); //does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme,r_model_part,A,Dx,b); if (BaseType::GetEchoLevel()== 3) { std::cout << "before the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } boost::timer solve_time; SystemSolve(A,Dx,b); if(BaseType::GetEchoLevel()>0) { std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl; } if (BaseType::GetEchoLevel()== 3) { std::cout << "after the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY BuildRHS(pScheme,r_model_part,b); SystemSolve(A,Dx,b); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) { KRATOS_TRY KRATOS_WATCH("setting up the dofs"); //Gets the array of elements from the modeler ElementsArrayType& pElements = r_model_part.Elements(); Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); //mDofSet.clear(); //double StartTime = GetTickCount(); for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { // gets list of Dof involved on every element //aaa = GetTickCount(); pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo); //bbb += GetTickCount() - aaa; //ccc = GetTickCount(); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { Doftemp.push_back(*i); //mDofSet.push_back(*i); } //ddd += GetTickCount() - ccc; } //std::cout << "searching " << bbb << std::endl; //std::cout << "inserting " << ddd << std::endl; //taking in account conditions ConditionsArrayType& pConditions = r_model_part.Conditions(); for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin(); it!=pConditions.ptr_end(); ++it) { // gets list of Dof involved on every element pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo); //ccc = GetTickCount(); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { //mDofSet.push_back(*i); Doftemp.push_back(*i); } //ddd += GetTickCount() - ccc; } //std::cout << "searching " << bbb << std::endl; //std::cout << "inserting " << ddd << std::endl; //ccc = GetTickCount(); Doftemp.Unique(); BaseType::mDofSet = Doftemp; //ddd = GetTickCount() - ccc; //std::cout << "Unique " << ddd << std::endl; //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpSystem( ModelPart& r_model_part ) { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ElementsArrayType& rElements, ConditionsArrayType& rConditions, ProcessInfo& CurrentProcessInfo ) { KRATOS_TRY if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) ); pA.swap(pNewA); } if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) ); pDx.swap(pNewDx); } if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) ); pb.swap(pNewb); } if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false); ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo); } else { if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true); ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo); } } if(Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize,false); if(b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize,false); // //if needed resize the vector for the calculation of reactions if(BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size()-BaseType::mEquationSystemSize; if(BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize,false); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { //refresh RHS to have the correct reactions BuildRHS(pScheme,r_model_part,b); int i; int systemsize = BaseType::mDofSet.size() - BaseType::mpReactionsVector->size(); typename DofsArrayType::ptr_iterator it2; //std::set<Dof::Pointer,ComparePDof>::iterator it2; //updating variables TSystemVectorType& ReactionsVector = *(BaseType::mpReactionsVector); for (it2=BaseType::mDofSet.ptr_begin();it2 != BaseType::mDofSet.ptr_end(); ++it2) { if ( (*it2)->IsFixed() ) { i=(*it2)->EquationId(); i-=systemsize; (*it2)->GetSolutionStepReactionValue() = ReactionsVector[i]; } } } //************************************************************************** //************************************************************************** void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) {} //************************************************************************** //************************************************************************** void ApplyPointLoads( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) {} /** this function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() { this->mDofSet = DofsArrayType(); TSparseSpace::Clear( *(this->mpReactionsVector) ); // this->mReactionsVector = TSystemVectorType(); if (this->GetEchoLevel()>0) { KRATOS_WATCH("ParallelResidualBasedEliminationBuilderAndSolver Clear Function called"); } } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ //************************************************************************** virtual void ConstructMatrixStructure( TSystemMatrixType& A, ElementsContainerType& rElements, ConditionsArrayType& rConditions, ProcessInfo& CurrentProcessInfo) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); // std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix)); Element::EquationIdVectorType ids(3,0); for(typename ElementsContainerType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; i_element++) { (i_element)->EquationIdVector(ids, CurrentProcessInfo); for(std::size_t i = 0 ; i < ids.size() ; i++) if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; j++) if(ids[j] < equation_size) { AddUnique(row_indices,ids[j]); //indices[ids[i]].push_back(ids[j]); } } } for(typename ConditionsArrayType::iterator i_condition = rConditions.begin() ; i_condition != rConditions.end() ; i_condition++) { (i_condition)->EquationIdVector(ids, CurrentProcessInfo); for(std::size_t i = 0 ; i < ids.size() ; i++) if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; j++) if(ids[j] < equation_size) { AddUnique(row_indices,ids[j]); // indices[ids[i]].push_back(ids[j]); } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; i++) { data_size += indices[i].size(); } A.reserve(data_size,false); //filling with zero the matrix (creating the structure) for(std::size_t i = 0 ; i < indices.size() ; i++) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i,*it,0.00); // A()(i,*it) = 0.00; } //row_indices = std::vector<std::size_t>(); row_indices.clear(); } } //************************************************************************** void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) { for (unsigned int j_local=0; j_local<local_size; j_local++) { unsigned int j_global=EquationId[j_local]; if ( j_global < BaseType::mEquationSystemSize ) { A(i_global,j_global) += LHS_Contribution(i_local,j_local); } } } } } //************************************************************************** void AssembleRHS( TSystemVectorType& b, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag==false) //if we don't need to calculate reactions { for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs { // ASSEMBLING THE SYSTEM VECTOR b[i_global] += RHS_Contribution[i_local]; } } } else //when the calculation of reactions is needed { TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs { // ASSEMBLING THE SYSTEM VECTOR b[i_global] += RHS_Contribution[i_local]; } else //on "fixed" DOFs { // Assembling the Vector of REACTIONS ReactionsVector[i_global-BaseType::mEquationSystemSize] -= RHS_Contribution[i_local]; } } } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) { for (unsigned int j_local=0; j_local<local_size; j_local++) { int j_global=EquationId[j_local]; A(i_global,j_global) += LHS_Contribution(i_local,j_local); } } } } //****************************************************************************************** //****************************************************************************************** inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while ( i != endit && (*i) != candidate) { i++; } if( i == endit ) { v.push_back(candidate); } } //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class ParallelResidualBasedEliminationBuilderAndSolver */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
/* ********************************************************* * * Last Modified by: $Author: nelson $ * Date: $Date: 2008-12-04 17:12:56 $ * Revision: $Revision: 1.7 $ * * ***********************************************************/ #if !defined(KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER ) #define KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER /* System includes */ #include <set> #include <omp.h> /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. Current class provides an implementation for standard builder and solving operations. the RHS is constituted by the unbalanced loads (residual) Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace , //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ParallelResidualBasedEliminationBuilderAndSolver : public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ //typedef boost::shared_ptr< ParallelResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver> > Pointer; KRATOS_CLASS_POINTER_DEFINITION( ParallelResidualBasedEliminationBuilderAndSolver ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor. */ ParallelResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver) { /* std::cout << "using the standard builder and solver " << std::endl; */ } /** Destructor. */ virtual ~ParallelResidualBasedEliminationBuilderAndSolver(){} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY if(!pScheme) KRATOS_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //create a partition of the element array int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); KRATOS_WATCH( number_of_threads ); KRATOS_WATCH( element_partition ); double start_prod = omp_get_wtime(); for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } } vector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1]; // assemble all elements for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); } } double stop_prod = omp_get_wtime(); std::cout << "time: " << stop_prod - start_prod << std::endl; KRATOS_WATCH("finished parallel building"); /* LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); } */ //for( int i=0; i<A.size1(); i++ ) //{ // for( int j=0; j<A.size2(); j++ ) // { // std::cout << A(i,j); // } // std::cout << std::endl; //} KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SystemSolve( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY double start_solve = omp_get_wtime(); double norm_b; if(b.size() != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if(norm_b != 0.00) BaseType::mpLinearSystemSolver->Solve(A,Dx,b); else TSparseSpace::SetToZero(Dx); //prints informations about the current time if (BaseType::GetEchoLevel()>1) { std::cout << *(BaseType::mpLinearSystemSolver) << std::endl; } double stop_solve= omp_get_wtime(); std::cout << "time: " << stop_solve - start_solve << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY boost::timer building_time; Build(pScheme,r_model_part,A,b); if(BaseType::GetEchoLevel()>0) { std::cout << "Building Time : " << building_time.elapsed() << std::endl; } // ApplyPointLoads(pScheme,r_model_part,b); //does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme,r_model_part,A,Dx,b); if (BaseType::GetEchoLevel()== 3) { std::cout << "before the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } boost::timer solve_time; SystemSolve(A,Dx,b); if(BaseType::GetEchoLevel()>0) { std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl; } if (BaseType::GetEchoLevel()== 3) { std::cout << "after the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY BuildRHS(pScheme,r_model_part,b); SystemSolve(A,Dx,b); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) { KRATOS_TRY KRATOS_WATCH("setting up the dofs"); //Gets the array of elements from the modeler ElementsArrayType& pElements = r_model_part.Elements(); Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); //mDofSet.clear(); //double StartTime = GetTickCount(); for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { // gets list of Dof involved on every element //aaa = GetTickCount(); pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo); //bbb += GetTickCount() - aaa; //ccc = GetTickCount(); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { Doftemp.push_back(*i); //mDofSet.push_back(*i); } //ddd += GetTickCount() - ccc; } //std::cout << "searching " << bbb << std::endl; //std::cout << "inserting " << ddd << std::endl; //taking in account conditions ConditionsArrayType& pConditions = r_model_part.Conditions(); for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin(); it!=pConditions.ptr_end(); ++it) { // gets list of Dof involved on every element pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo); //ccc = GetTickCount(); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { //mDofSet.push_back(*i); Doftemp.push_back(*i); } //ddd += GetTickCount() - ccc; } //std::cout << "searching " << bbb << std::endl; //std::cout << "inserting " << ddd << std::endl; //ccc = GetTickCount(); Doftemp.Unique(); BaseType::mDofSet = Doftemp; //ddd = GetTickCount() - ccc; //std::cout << "Unique " << ddd << std::endl; //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpSystem( ModelPart& r_model_part ) { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ElementsArrayType& rElements, ConditionsArrayType& rConditions, ProcessInfo& CurrentProcessInfo ) { KRATOS_TRY if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) ); pA.swap(pNewA); } if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) ); pDx.swap(pNewDx); } if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) ); pb.swap(pNewb); } if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false); ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo); } else { if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true); ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo); } } if(Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize,false); if(b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize,false); // //if needed resize the vector for the calculation of reactions if(BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size()-BaseType::mEquationSystemSize; if(BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize,false); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { //refresh RHS to have the correct reactions BuildRHS(pScheme,r_model_part,b); int i; int systemsize = BaseType::mDofSet.size() - BaseType::mpReactionsVector->size(); typename DofsArrayType::ptr_iterator it2; //std::set<Dof::Pointer,ComparePDof>::iterator it2; //updating variables TSystemVectorType& ReactionsVector = *(BaseType::mpReactionsVector); for (it2=BaseType::mDofSet.ptr_begin();it2 != BaseType::mDofSet.ptr_end(); ++it2) { if ( (*it2)->IsFixed() ) { i=(*it2)->EquationId(); i-=systemsize; (*it2)->GetSolutionStepReactionValue() = ReactionsVector[i]; } } } //************************************************************************** //************************************************************************** void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) {} //************************************************************************** //************************************************************************** void ApplyPointLoads( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) {} /** this function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() { this->mDofSet = DofsArrayType(); TSparseSpace::Clear( *(this->mpReactionsVector) ); // this->mReactionsVector = TSystemVectorType(); if (this->GetEchoLevel()>0) { KRATOS_WATCH("ParallelResidualBasedEliminationBuilderAndSolver Clear Function called"); } } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ //************************************************************************** virtual void ConstructMatrixStructure( TSystemMatrixType& A, ElementsContainerType& rElements, ConditionsArrayType& rConditions, ProcessInfo& CurrentProcessInfo) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); // std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix)); Element::EquationIdVectorType ids(3,0); for(typename ElementsContainerType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; i_element++) { (i_element)->EquationIdVector(ids, CurrentProcessInfo); for(std::size_t i = 0 ; i < ids.size() ; i++) if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; j++) if(ids[j] < equation_size) { AddUnique(row_indices,ids[j]); //indices[ids[i]].push_back(ids[j]); } } } for(typename ConditionsArrayType::iterator i_condition = rConditions.begin() ; i_condition != rConditions.end() ; i_condition++) { (i_condition)->EquationIdVector(ids, CurrentProcessInfo); for(std::size_t i = 0 ; i < ids.size() ; i++) if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; j++) if(ids[j] < equation_size) { AddUnique(row_indices,ids[j]); // indices[ids[i]].push_back(ids[j]); } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; i++) { data_size += indices[i].size(); } A.reserve(data_size,false); //filling with zero the matrix (creating the structure) for(std::size_t i = 0 ; i < indices.size() ; i++) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i,*it,0.00); // A()(i,*it) = 0.00; } //row_indices = std::vector<std::size_t>(); row_indices.clear(); } } //************************************************************************** void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) { for (unsigned int j_local=0; j_local<local_size; j_local++) { unsigned int j_global=EquationId[j_local]; if ( j_global < BaseType::mEquationSystemSize ) { A(i_global,j_global) += LHS_Contribution(i_local,j_local); } } } } } //************************************************************************** void AssembleRHS( TSystemVectorType& b, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag==false) //if we don't need to calculate reactions { for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs { // ASSEMBLING THE SYSTEM VECTOR b[i_global] += RHS_Contribution[i_local]; } } } else //when the calculation of reactions is needed { TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs { // ASSEMBLING THE SYSTEM VECTOR b[i_global] += RHS_Contribution[i_local]; } else //on "fixed" DOFs { // Assembling the Vector of REACTIONS ReactionsVector[i_global-BaseType::mEquationSystemSize] -= RHS_Contribution[i_local]; } } } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) { for (unsigned int j_local=0; j_local<local_size; j_local++) { int j_global=EquationId[j_local]; A(i_global,j_global) += LHS_Contribution(i_local,j_local); } } } } //****************************************************************************************** //****************************************************************************************** inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while ( i != endit && (*i) != candidate) { i++; } if( i == endit ) { v.push_back(candidate); } } //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class ParallelResidualBasedEliminationBuilderAndSolver */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
/* ********************************************************* * * Last Modified by: $Author: nelson $ * Date: $Date: 2008-12-04 17:12:56 $ * Revision: $Revision: 1.7 $ * * ***********************************************************/ #if !defined(KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER ) #define KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER /* System includes */ #include <set> #include <omp.h> /* External includes */ #include "boost/smart_ptr.hpp" /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. Current class provides an implementation for standard builder and solving operations. the RHS is constituted by the unbalanced loads (residual) Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace , //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ParallelResidualBasedEliminationBuilderAndSolver : public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ //typedef boost::shared_ptr< ParallelResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver> > Pointer; KRATOS_CLASS_POINTER_DEFINITION( ParallelResidualBasedEliminationBuilderAndSolver ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor. */ ParallelResidualBasedEliminationBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver) { /* std::cout << "using the standard builder and solver " << std::endl; */ } /** Destructor. */ virtual ~ParallelResidualBasedEliminationBuilderAndSolver(){} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY if(!pScheme) KRATOS_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //create a partition of the element array int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); KRATOS_WATCH( number_of_threads ); KRATOS_WATCH( element_partition ); double start_prod = omp_get_wtime(); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k]; typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); #pragma omp critical { //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } } } vector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); #pragma omp parallel for for(int k=0; k<number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1]; // assemble all elements for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); #pragma omp critical { //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); } } } double stop_prod = omp_get_wtime(); std::cout << "time: " << stop_prod - start_prod << std::endl; KRATOS_WATCH("finished parallel building"); /* LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); } */ //for( int i=0; i<A.size1(); i++ ) //{ // for( int j=0; j<A.size2(); j++ ) // { // std::cout << A(i,j); // } // std::cout << std::endl; //} KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SystemSolve( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY double start_solve = omp_get_wtime(); double norm_b; if(b.size() != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if(norm_b != 0.00) BaseType::mpLinearSystemSolver->Solve(A,Dx,b); else TSparseSpace::SetToZero(Dx); //prints informations about the current time if (BaseType::GetEchoLevel()>1) { std::cout << *(BaseType::mpLinearSystemSolver) << std::endl; } double stop_solve= omp_get_wtime(); std::cout << "time: " << stop_solve - start_solve << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY boost::timer building_time; Build(pScheme,r_model_part,A,b); if(BaseType::GetEchoLevel()>0) { std::cout << "Building Time : " << building_time.elapsed() << std::endl; } // ApplyPointLoads(pScheme,r_model_part,b); //does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme,r_model_part,A,Dx,b); if (BaseType::GetEchoLevel()== 3) { std::cout << "before the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } boost::timer solve_time; SystemSolve(A,Dx,b); if(BaseType::GetEchoLevel()>0) { std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl; } if (BaseType::GetEchoLevel()== 3) { std::cout << "after the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY BuildRHS(pScheme,r_model_part,b); SystemSolve(A,Dx,b); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) { KRATOS_TRY KRATOS_WATCH("setting up the dofs"); //Gets the array of elements from the modeler ElementsArrayType& pElements = r_model_part.Elements(); Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); //mDofSet.clear(); //double StartTime = GetTickCount(); for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { // gets list of Dof involved on every element //aaa = GetTickCount(); pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo); //bbb += GetTickCount() - aaa; //ccc = GetTickCount(); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { Doftemp.push_back(*i); //mDofSet.push_back(*i); } //ddd += GetTickCount() - ccc; } //std::cout << "searching " << bbb << std::endl; //std::cout << "inserting " << ddd << std::endl; //taking in account conditions ConditionsArrayType& pConditions = r_model_part.Conditions(); for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin(); it!=pConditions.ptr_end(); ++it) { // gets list of Dof involved on every element pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo); //ccc = GetTickCount(); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { //mDofSet.push_back(*i); Doftemp.push_back(*i); } //ddd += GetTickCount() - ccc; } //std::cout << "searching " << bbb << std::endl; //std::cout << "inserting " << ddd << std::endl; //ccc = GetTickCount(); Doftemp.Unique(); BaseType::mDofSet = Doftemp; //ddd = GetTickCount() - ccc; //std::cout << "Unique " << ddd << std::endl; //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpSystem( ModelPart& r_model_part ) { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ElementsArrayType& rElements, ConditionsArrayType& rConditions, ProcessInfo& CurrentProcessInfo ) { KRATOS_TRY if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) ); pA.swap(pNewA); } if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) ); pDx.swap(pNewDx); } if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) ); pb.swap(pNewb); } if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false); ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo); } else { if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true); ConstructMatrixStructure(A,rElements,rConditions,CurrentProcessInfo); } } if(Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize,false); if(b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize,false); // //if needed resize the vector for the calculation of reactions if(BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size()-BaseType::mEquationSystemSize; if(BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize,false); } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { //refresh RHS to have the correct reactions BuildRHS(pScheme,r_model_part,b); int i; int systemsize = BaseType::mDofSet.size() - BaseType::mpReactionsVector->size(); typename DofsArrayType::ptr_iterator it2; //std::set<Dof::Pointer,ComparePDof>::iterator it2; //updating variables TSystemVectorType& ReactionsVector = *(BaseType::mpReactionsVector); for (it2=BaseType::mDofSet.ptr_begin();it2 != BaseType::mDofSet.ptr_end(); ++it2) { if ( (*it2)->IsFixed() ) { i=(*it2)->EquationId(); i-=systemsize; (*it2)->GetSolutionStepReactionValue() = ReactionsVector[i]; } } } //************************************************************************** //************************************************************************** void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) {} //************************************************************************** //************************************************************************** void ApplyPointLoads( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) {} /** this function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() { this->mDofSet = DofsArrayType(); TSparseSpace::Clear( *(this->mpReactionsVector) ); // this->mReactionsVector = TSystemVectorType(); if (this->GetEchoLevel()>0) { KRATOS_WATCH("ParallelResidualBasedEliminationBuilderAndSolver Clear Function called"); } } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ //************************************************************************** virtual void ConstructMatrixStructure( TSystemMatrixType& A, ElementsContainerType& rElements, ConditionsArrayType& rConditions, ProcessInfo& CurrentProcessInfo) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); // std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix)); Element::EquationIdVectorType ids(3,0); for(typename ElementsContainerType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; i_element++) { (i_element)->EquationIdVector(ids, CurrentProcessInfo); for(std::size_t i = 0 ; i < ids.size() ; i++) if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; j++) if(ids[j] < equation_size) { AddUnique(row_indices,ids[j]); //indices[ids[i]].push_back(ids[j]); } } } for(typename ConditionsArrayType::iterator i_condition = rConditions.begin() ; i_condition != rConditions.end() ; i_condition++) { (i_condition)->EquationIdVector(ids, CurrentProcessInfo); for(std::size_t i = 0 ; i < ids.size() ; i++) if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; j++) if(ids[j] < equation_size) { AddUnique(row_indices,ids[j]); // indices[ids[i]].push_back(ids[j]); } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; i++) { data_size += indices[i].size(); } A.reserve(data_size,false); //filling with zero the matrix (creating the structure) for(std::size_t i = 0 ; i < indices.size() ; i++) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i,*it,0.00); // A()(i,*it) = 0.00; } //row_indices = std::vector<std::size_t>(); row_indices.clear(); } } //************************************************************************** void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) { for (unsigned int j_local=0; j_local<local_size; j_local++) { unsigned int j_global=EquationId[j_local]; if ( j_global < BaseType::mEquationSystemSize ) { A(i_global,j_global) += LHS_Contribution(i_local,j_local); } } } } } //************************************************************************** void AssembleRHS( TSystemVectorType& b, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag==false) //if we don't need to calculate reactions { for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs { // ASSEMBLING THE SYSTEM VECTOR b[i_global] += RHS_Contribution[i_local]; } } } else //when the calculation of reactions is needed { TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs { // ASSEMBLING THE SYSTEM VECTOR b[i_global] += RHS_Contribution[i_local]; } else //on "fixed" DOFs { // Assembling the Vector of REACTIONS ReactionsVector[i_global-BaseType::mEquationSystemSize] -= RHS_Contribution[i_local]; } } } } /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) { for (unsigned int j_local=0; j_local<local_size; j_local++) { int j_global=EquationId[j_local]; A(i_global,j_global) += LHS_Contribution(i_local,j_local); } } } } //****************************************************************************************** //****************************************************************************************** inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while ( i != endit && (*i) != candidate) { i++; } if( i == endit ) { v.push_back(candidate); } } //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class ParallelResidualBasedEliminationBuilderAndSolver */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_PARALLEL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
atomic-6.c
/* PR middle-end/36106 */ /* { dg-options "-O2" } */ /* { dg-options "-O2 -mieee" { target alpha*-*-* } } */ /* { dg-options "-O2 -march=i586" { target { { i?86-*-* x86_64-*-* } && ilp32 } } } */ #ifdef __i386__ # include "cpuid.h" #endif extern void abort (void); union { unsigned long long l; double d; } u = { .l = 0x7ff0000000072301ULL }; int __attribute__((noinline)) do_test (void) { #pragma omp atomic u.d += 1.0L; return 0; } int main (void) { #ifdef __i386__ unsigned int eax, ebx, ecx, edx; if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx)) return 0; if (!(edx & bit_CMPXCHG8B)) return 0; #endif do_test (); return 0; }
/* PR middle-end/36106 */ /* { dg-options "-O2" } */ /* { dg-options "-O2 -mieee" { target alpha*-*-* } } */ /* * { dg-options "-O2 -march=i586" { target { { i?86-*-* x86_64-*-* } && ilp32 * } } } */ #ifdef __i386__ #include "cpuid.h" #endif extern void abort(void); union { unsigned long long l; double d; } u = { .l = 0x7ff0000000072301ULL }; int __attribute__((noinline)) do_test(void) { u.d += 1.0L; return 0; } int main(void) { #ifdef __i386__ unsigned int eax, ebx, ecx, edx; if (!__get_cpuid(1, &eax, &ebx, &ecx, &edx)) return 0; if (!(edx & bit_CMPXCHG8B)) return 0; #endif do_test(); return 0; }
/* PR middle-end/36106 */ /* { dg-options "-O2" } */ /* { dg-options "-O2 -mieee" { target alpha*-*-* } } */ /* * { dg-options "-O2 -march=i586" { target { { i?86-*-* x86_64-*-* } && ilp32 * } } } */ #ifdef __i386__ #include "cpuid.h" #endif extern void abort(void); union { unsigned long long l; double d; } u = { .l = 0x7ff0000000072301ULL }; int __attribute__((noinline)) do_test(void) { #pragma omp atomic u.d += 1.0L; return 0; } int main(void) { #ifdef __i386__ unsigned int eax, ebx, ecx, edx; if (!__get_cpuid(1, &eax, &ebx, &ecx, &edx)) return 0; if (!(edx & bit_CMPXCHG8B)) return 0; #endif do_test(); return 0; }
BAMarginals.h
/* +----------------------------------+ | | | *** BA Marginals *** | | | | Copyright (c) -tHE SWINe- 2016 | | | | BAMarginals.h | | | +----------------------------------+ */ #pragma once #ifndef __BA_MARGINALS_INCLUDED #define __BA_MARGINALS_INCLUDED /** * @file include/slam/BAMarginals.h * @brief helper classes for covariance recovery in Schur-complemented systems * @author -tHE SWINe- * @date 2016-02-15 */ #include "slam/Marginals.h" #include "slam/MemUsage.h" // PRIsizeB /** * @def __BA_MARGS_DO_MEMORY_PROFILING * @brief if defined, memory use of the marginals will be profiled */ //#define __BA_MARGS_DO_MEMORY_PROFILING /** * @brief internal functions for Schur complement marginals */ namespace sc_margs_detail { // todo - use wrap3 below /** * @brief multiply add operation with a sparse matrix and a block vector */ class CBlockVectorMAD_Impl { public: struct TInnerContext { double *p_dest; const double *p_block; inline TInnerContext(double *_p_dest, const double *_p_block) :p_dest(_p_dest), p_block(_p_block) {} }; template <const int n_row_height, class CColumnWidth> class CInnerLoop { // not dependent on CBlockSizeList; don't make it an inner class of COuterLoop public: enum { n_column_width = CColumnWidth::n_size }; typedef typename CUberBlockMatrix::CMakeMatrixRef<n_column_width, n_column_width>::_Ty _TyDestMap; typedef typename CUberBlockMatrix::CMakeMatrixRef<n_row_height, n_column_width>::_TyConst _TyBlockMap; public: static inline void Do(TInnerContext t_ctx) { _TyBlockMap fbs_block(t_ctx.p_block); _TyDestMap(t_ctx.p_dest) += fbs_block.transpose().lazyProduct(fbs_block); // mad } }; struct TOuterContext { double *p_dest; const CUberBlockMatrix &r_block_vector; inline TOuterContext(double *_p_dest, const CUberBlockMatrix &_r_block_vector) :p_dest(_p_dest), r_block_vector(_r_block_vector) {} }; template <const int n_column_width, class CBlockSizeList> class COuterLoop { public: static inline void Do(TOuterContext t_ctx) { _ASSERTE(n_column_width == t_ctx.r_block_vector.n_BlockColumn_Column_Num(0)); for(size_t i = 0, n = t_ctx.r_block_vector.n_BlockColumn_Block_Num(0); i < n; ++ i) { CUberBlockMatrix::_TyMatrixXdRef t_block = const_cast<CUberBlockMatrix&>(t_ctx.r_block_vector).t_Block_AtColumn(0, i); // hack - need to cast, const_ref does not expose its pointer to data size_t n_row_height = t_block.rows(); _ASSERTE(n_column_width == t_block.cols()); fbs_ut::CWrap2<CInnerLoop, fbs_ut::CCTSize<n_column_width> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CBlockSizeList, n_column_width>(int(n_row_height), TInnerContext(t_ctx.p_dest, t_block.data()/*&t_block(0, 0)*/)); // mad } // for each nnz block } }; /*public: template <class CBlockSizeList> static void BlockVector_PreMultiplyWithSelfTranspose_Add_FBS(double *p_dest, const CUberBlockMatrix &r_block_vector) // g++ is unable to reference TOuterLoop from the outside for some reason { fbs_ut::CWrap2<COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>( r_block_vector.n_Column_Num(), TOuterContext(p_dest, r_block_vector)); // decide over vector width }*/ }; template <class CBlockSizeList, class CDestMatrix> inline void BlockVector_PreMultiplyWithSelfTranspose_Add_FBS(CDestMatrix &r_dest, const CUberBlockMatrix &r_block_vector) { _ASSERTE(r_block_vector.n_BlockColumn_Num() == 1); // it is a block column-vector _ASSERTE(r_block_vector.n_Column_Num() == r_dest.rows()); // the number of columns in the block vector matches the size of the destination _ASSERTE(r_dest.rows() == r_dest.cols()); // the output is square _ASSERTE(!(CDestMatrix::Flags & Eigen::RowMajor)); // must be column-major otherwise the conversion to pointer strips data //_ASSERTE(CDestMatrix::MaxColsAtCompileTime == Eigen::Dynamic || // CDestMatrix::MaxColsAtCompileTime == r_dest.cols() || // r_dest.cols() == 1); // the stride must be tight or there is a single col and it does not matter _ASSERTE(r_dest.cols() <= 1 || &r_dest(0, 1) == &r_dest(0, 0) + r_dest.rows()); // the stride must be tight or there is a single col and it does not matter // do not zero r_dest //CBlockVectorMAD_Impl::template BlockVector_PreMultiplyWithSelfTranspose_Add_FBS<CBlockSizeList>(r_dest.data(), r_block_vector); fbs_ut::CWrap2<CBlockVectorMAD_Impl::COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>(int(r_block_vector.n_Column_Num()), CBlockVectorMAD_Impl::TOuterContext(r_dest.data(), r_block_vector)); // decide over vector width } inline void Calculate_UpperTriangularTransposeSolve_Bases(CUberBlockMatrix &S_bases, const CUberBlockMatrix &S, /*const*/ cs *p_St, size_t n_column, Eigen::MatrixXd &r_workspace, std::vector<size_t> &r_workspace1) // this is inline, to avoid link conflicts { _ASSERTE(S.b_EqualLayout(S_bases)); // will yield a matrix with the same sparsity structure //S.CopyLayoutTo(S_bases); _ASSERTE(n_column < S.n_BlockColumn_Num()); // make sure the column is inside the matrix _ASSERTE(p_St->m == S.n_BlockRow_Num() && p_St->n == S.n_BlockColumn_Num()); // should be the same matrix _ASSERTE(sizeof(csi) == sizeof(size_t)); r_workspace1.resize(2 * p_St->n); // alloc workspace { //cs *p_St = cs_transpose(p_S, 0); // need p_St /*csi p_col[2] = {0, 1}; csi n_row = 0; cs B; B.m = p_St->m; B.n = 1; B.p = p_col; B.i = &n_row; B.x = 0; B.nzmax = 1; B.nz = -1;*/ // prepare a single entry CSC matrix //Eigen::Matrix<double, Eigen::Dynamic, 6> S_dense_basis(S.n_Row_Num(), 6); // todo - implement matrix solving and try to make this row major // unit basis matrix //for(size_t n_column = 0, n = S.n_BlockColumn_Num(); n_column < n; ++ n_column) { // t_odo - do this in parallel (probably explicit matrix reduction rather than locking) { //size_t n_column = n_column; size_t w = S.n_BlockColumn_Column_Num(n_column); // t_odo - FBS it //_ASSERTE(w == 6); //n_row = n_column; //size_t n_first_dep_col = cs_reach(p_St, &B, 0, (csi*)&r_workspace1.front(), 0); // modifies p_St but then puts it back size_t n_first_dep_col = cs_dfs(n_column, p_St, p_St->n, (csi*)&r_workspace1.front(), (csi*)&r_workspace1.front() + p_St->n, 0); // no need for B // todo - reimplement this directly on block matrices, try to avoid needing the transpose size_t n_dep_col_num = p_St->n - n_first_dep_col; const size_t *p_dep_col = &r_workspace1[n_first_dep_col]; for(size_t j = 0; j < n_dep_col_num; ++ j) CS_MARK(p_St->p, p_dep_col[j]); // restore col. pointers after calling cs_dfs // get the list of columns of S that affect block U_Dinv_{n_column, *} r_workspace.resize(S.n_Row_Num(), w); // alloc workspace Eigen::MatrixXd &S_dense_basis = r_workspace; // just rename //Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>, Eigen::Aligned> // S_dense_basis(r_workspace.data(), S.n_Row_Num(), w); // map the workspace as (column-wise) fixed-size matrix S_dense_basis.setZero(); S_dense_basis.middleRows(S_bases.n_BlockColumn_Base(n_column), w).setIdentity(); // create a vector of zeros for(size_t c = 0; c < w; ++ c) { //S.UpperTriangularTranspose_Solve(&U_Dinv_i_permd.col(c)(0), U_Dinv_i_permd.rows(), p_dep_col, n_dep_col_num); S.UpperTriangularTranspose_Solve/*_FBS<SC_BlockSizes>*/(&S_dense_basis.col(c)(0), S_dense_basis.rows(), p_dep_col, n_dep_col_num); } // sparse sparse UTTSolve for(size_t j = 0; j < n_dep_col_num; ++ j) { size_t n_row = p_dep_col[j]; size_t y = S_bases.n_BlockColumn_Base(n_row); size_t h = S_bases.n_BlockColumn_Column_Num(n_row); // those are rows but S_bases is symmetric #ifdef _DEBUG if(j > 0) { const size_t r_prev = p_dep_col[j - 1]; size_t y_prev = S_bases.n_BlockColumn_Base(r_prev); size_t h_prev = S_bases.n_BlockColumn_Column_Num(r_prev); size_t e_prev = y_prev + h_prev; _ASSERTE(S_dense_basis.middleRows(e_prev, y - e_prev).squaredNorm() == 0); // make sure there are zeros between consecutive (nonadjacent) blocks } else if(y > 0) _ASSERTE(S_dense_basis.topRows(y).squaredNorm() == 0); // make sure there are zeros above the first block if(j + 1 == n_dep_col_num) _ASSERTE(S_dense_basis.bottomRows(S_dense_basis.rows() - (y + h)).squaredNorm() == 0); // make sure there are zeros till the end // make sure that there are only zeros in between the elements #endif // _DEBUG //_ASSERTE(h == 6); //_ASSERTE(S_bases.n_BlockColumn_Column_Num(n_row) == 6); // t_odo - FBS it S_bases.t_GetBlock_Log(n_row, n_column, h, w) = S_dense_basis.middleRows(S_bases.n_BlockColumn_Base(n_row), h); // this is transposed (transpose the block as well?): each row is a single basis; this only works if the structure of S is symmetric } // sparse fill the bases matrix } //S_bases.Rasterize("S_bases.tga", 3); // ... } } /** * @brief FBS implementation for the upper triangular transpose solve of the sparse bases matrix */ class CUTTSolve_Bases_Impl { public: struct TInnerContext { CUberBlockMatrix &r_dest; const size_t n_column; const Eigen::MatrixXd &r_src; const size_t n_row; inline TInnerContext(CUberBlockMatrix &_r_dest, size_t _n_column, const Eigen::MatrixXd &_r_src, size_t _n_row) :r_dest(_r_dest), n_column(_n_column), r_src(_r_src), n_row(_n_row) {} }; template <const int n_row_height, class CColumnWidth> class CInnerLoop { public: enum { n_column_width = CColumnWidth::n_size }; public: static inline void Do(TInnerContext t_ctx) { _ASSERTE(t_ctx.r_src.rows() == t_ctx.r_dest.n_Row_Num()); _ASSERTE(n_column_width == t_ctx.r_dest.n_BlockColumn_Column_Num(t_ctx.n_column)); Eigen::Map<const Eigen::Matrix<double, Eigen::Dynamic, n_column_width>, Eigen::Aligned> S_dense_basis(t_ctx.r_src.data(), t_ctx.r_src.rows(), n_column_width); // map the source as (column-wise) fixed-size matrix Eigen::Map<Eigen::Matrix<double, n_row_height, n_column_width> > dest_block(t_ctx.r_dest.p_GetBlock_Log(t_ctx.n_row, t_ctx.n_column, n_row_height, n_column_width, true, false)); dest_block = S_dense_basis.template middleRows<n_row_height>(t_ctx.r_dest.n_BlockColumn_Base(t_ctx.n_row)); } }; struct TOuterContext { CUberBlockMatrix &r_S_bases; const size_t n_column; const CUberBlockMatrix &r_S; Eigen::MatrixXd &r_workspace; const size_t *p_dep_col; const size_t n_dep_num; inline TOuterContext(CUberBlockMatrix &_r_S_bases, size_t _n_column, const CUberBlockMatrix &_r_S, Eigen::MatrixXd &_r_workspace, const size_t *_p_dep_col, size_t _n_dep_num) :r_S_bases(_r_S_bases), n_column(_n_column), r_S(_r_S), r_workspace(_r_workspace), p_dep_col(_p_dep_col), n_dep_num(_n_dep_num) {} }; template <const int n_column_width, class CBlockSizeList> class COuterLoop { public: static inline void Do(TOuterContext t_ctx) { t_ctx.r_workspace.resize(t_ctx.r_S.n_Row_Num(), n_column_width); // alloc workspace Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, n_column_width>, Eigen::Aligned> S_dense_basis(t_ctx.r_workspace.data(), t_ctx.r_S.n_Row_Num(), n_column_width); // map the workspace as (column-wise) fixed-size matrix S_dense_basis.setZero(); S_dense_basis.template middleRows<n_column_width>(t_ctx.r_S.n_BlockColumn_Base(t_ctx.n_column)).setIdentity(); // create a vector of zeros for(size_t c = 0; c < n_column_width; ++ c) { // todo - make a version of UpperTriangularTranspose_Solve_FBS for vectors t_ctx.r_S.UpperTriangularTranspose_Solve_FBS<CBlockSizeList>(&S_dense_basis.col(c)(0), S_dense_basis.rows(), t_ctx.p_dep_col, t_ctx.n_dep_num); } // sparse sparse UTTSolve for(size_t j = 0; j < t_ctx.n_dep_num; ++ j) { size_t n_row = t_ctx.p_dep_col[j]; size_t h = t_ctx.r_S.n_BlockColumn_Column_Num(n_row); // those are rows but S_bases is symmetric #ifdef _DEBUG size_t y = t_ctx.r_S.n_BlockColumn_Base(n_row); if(j > 0) { const size_t r_prev = t_ctx.p_dep_col[j - 1]; size_t y_prev = t_ctx.r_S.n_BlockColumn_Base(r_prev); size_t h_prev = t_ctx.r_S.n_BlockColumn_Column_Num(r_prev); size_t e_prev = y_prev + h_prev; _ASSERTE(S_dense_basis.middleRows(e_prev, y - e_prev).squaredNorm() == 0); // make sure there are zeros between consecutive (nonadjacent) blocks } else if(y > 0) _ASSERTE(S_dense_basis.topRows(y).squaredNorm() == 0); // make sure there are zeros above the first block if(j + 1 == t_ctx.n_dep_num) _ASSERTE(S_dense_basis.bottomRows(S_dense_basis.rows() - (y + h)).squaredNorm() == 0); // make sure there are zeros till the end // make sure that there are only zeros in between the elements #endif // _DEBUG //_ASSERTE(h == 6); //_ASSERTE(S_bases.n_BlockColumn_Column_Num(n_row) == 6); // t_odo - FBS it //S_bases.t_GetBlock_Log(n_row, n_column, h, w) = // S_dense_basis.middleRows<6>(S_bases.n_BlockColumn_Base(n_row)); // this is transposed (transpose the block as well?): each row is a single basis; this only works if the structure of S is symmetric fbs_ut::CWrap2<CInnerLoop, fbs_ut::CCTSize<n_column_width> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CBlockSizeList, n_column_width>(int(h), TInnerContext(t_ctx.r_S_bases, t_ctx.n_column, t_ctx.r_workspace, n_row)); // use SSE to copy stuff around } // sparse fill the bases matrix } }; }; template <class CBlockSizeList> void Calculate_UpperTriangularTransposeSolve_Bases_FBS(CUberBlockMatrix &S_bases, const CUberBlockMatrix &S, /*const*/ cs *p_St, size_t n_column, Eigen::MatrixXd &r_workspace, std::vector<size_t> &r_workspace1) { _ASSERTE(S.b_EqualLayout(S_bases)); // will yield a matrix with the same sparsity structure //S.CopyLayoutTo(S_bases); _ASSERTE(n_column < S.n_BlockColumn_Num()); // make sure the column is inside the matrix _ASSERTE(p_St->m == S.n_BlockRow_Num() && p_St->n == S.n_BlockColumn_Num()); // should be the same matrix _ASSERTE(sizeof(csi) == sizeof(size_t)); r_workspace1.resize(2 * p_St->n); // alloc workspace size_t w = S.n_BlockColumn_Column_Num(n_column); // t_odo - FBS it //_ASSERTE(w == 6); //n_row = n_column; //size_t n_first_dep_col = cs_reach(p_St, &B, 0, (csi*)&r_workspace1.front(), 0); // modifies p_St but then puts it back size_t n_first_dep_col = cs_dfs(n_column, p_St, p_St->n, (csi*)&r_workspace1.front(), (csi*)&r_workspace1.front() + p_St->n, 0); // no need for B // todo - reimplement this directly on block matrices, try to avoid needing the transpose size_t n_dep_col_num = p_St->n - n_first_dep_col; const size_t *p_dep_col = &r_workspace1[n_first_dep_col]; for(size_t j = 0; j < n_dep_col_num; ++ j) CS_MARK(p_St->p, p_dep_col[j]); // restore col. pointers after calling cs_dfs // get the list of columns of S that affect block U_Dinv_{n_column, *} // note that this is FBS-independent fbs_ut::CWrap2<CUTTSolve_Bases_Impl::COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>(int(w), CUTTSolve_Bases_Impl::TOuterContext(S_bases, n_column, S, r_workspace, p_dep_col, n_dep_col_num)); } } // ~sc_margs_detail template <class _SC_BlockSizes, class _U_BlockSizes, class _V_BlockSizes, class _D_BlockSizes> class CSchurComplement_Marginals { public: typedef _SC_BlockSizes SC_BlockSizes; typedef _SC_BlockSizes A_BlockSizes; // the same typedef _U_BlockSizes U_BlockSizes; typedef _V_BlockSizes V_BlockSizes; typedef _D_BlockSizes D_BlockSizes; typedef typename CUniqueTypelist<typename CConcatTypelist<typename CConcatTypelist<A_BlockSizes, U_BlockSizes>::_TyResult, typename CConcatTypelist<V_BlockSizes, D_BlockSizes>::_TyResult>::_TyResult>::_TyResult Lambda_BlockSizes; protected: bool m_b_verbose; mutable double m_f_time_Dinv_copy; mutable double m_f_time_sp_struct; mutable double m_f_time_S_bases; mutable double m_f_time_lm_inverse; mutable double m_f_time_cam_inverse; mutable double m_f_time_lambda_AMD; mutable double m_f_time_lambda_perm; mutable double m_f_time_lambda_Chol; mutable double m_f_time_lambda_recformula; mutable double m_f_time_lambda_unperm; mutable uint64_t m_n_worst_memory; public: CSchurComplement_Marginals(bool b_verbose = false) :m_b_verbose(b_verbose), m_f_time_Dinv_copy(0), m_f_time_sp_struct(0), m_f_time_S_bases(0), m_f_time_lm_inverse(0), m_f_time_cam_inverse(0), m_f_time_lambda_AMD(0), m_f_time_lambda_perm(0), m_f_time_lambda_Chol(0), m_f_time_lambda_recformula(0), m_f_time_lambda_unperm(0), m_n_worst_memory(0) {} inline bool b_Verbose() const { return m_b_verbose; } void Add_LambdaChol_Time(double f_time_AMD, double f_time_perm, double f_time_Chol) { m_f_time_lambda_AMD += f_time_AMD; m_f_time_lambda_perm += f_time_perm; m_f_time_lambda_Chol += f_time_Chol; } void Dump() const { printf("\trecursive margs took %f sec, out of which:\n", m_f_time_lambda_AMD + m_f_time_lambda_perm + m_f_time_lambda_Chol + m_f_time_lambda_recformula + m_f_time_lambda_unperm); printf("\t\t amd: %f\n", m_f_time_lambda_AMD); printf("\t\t perm: %f\n", m_f_time_lambda_perm); printf("\t\t Chol: %f\n", m_f_time_lambda_Chol); printf("\t\trform: %f\n", m_f_time_lambda_recformula); printf("\t\tunprm: %f\n", m_f_time_lambda_unperm); printf("\tSchur margs took %f sec, out of which:\n", m_f_time_Dinv_copy + m_f_time_sp_struct + m_f_time_S_bases + m_f_time_lm_inverse); printf("\t\tdinit: %f\n", m_f_time_Dinv_copy); printf("\t\tstruc: %f\n", m_f_time_sp_struct); printf("\t\tbases: %f\n", m_f_time_S_bases); printf("\t\t inv: %f\n", m_f_time_lm_inverse); printf("\trecursive inverse of cameras using SC took %f sec\n", m_f_time_cam_inverse); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\tSchur marginals took " PRIsizeB "B memory at most\n", PRIsizeBparams(m_n_worst_memory)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } bool Get_CholeskyLambda(CUberBlockMatrix &R, CMatrixOrdering &lam_ord, const CUberBlockMatrix &lambda) const { CTimer t; CTimerSampler timer(t); lam_ord.p_BlockOrdering(lambda, true); // w.r.t. lambda_perm const size_t *p_lam_ord = lam_ord.p_Get_InverseOrdering(); const size_t n_lam_ord_size = lam_ord.n_Ordering_Size(); // ordering for lambda double f_lambda_amd_time = 0; timer.Accum_DiffSample(f_lambda_amd_time); m_f_time_lambda_AMD += f_lambda_amd_time; CUberBlockMatrix lambda_amd; lambda.Permute_UpperTriangular_To(lambda_amd, p_lam_ord, n_lam_ord_size, true); double f_lambda_perm_time = 0; timer.Accum_DiffSample(f_lambda_perm_time); m_f_time_lambda_perm += f_lambda_perm_time; /*typedef CConcatTypelist<CConcatTypelist<SC_BlockSizes, U_BlockSizes>::_TyResult, CConcatTypelist<V_BlockSizes, D_BlockSizes>::_TyResult>::_TyResult Lambda_BlockSizes;*/ if(!R.CholeskyOf_FBS<Lambda_BlockSizes>(lambda_amd)) { fprintf(stderr, "error: got not pos def when factorizing lambda\n"); return false; } double f_lambda_chol_time = 0; timer.Accum_DiffSample(f_lambda_chol_time); m_f_time_lambda_Chol += f_lambda_chol_time; if(m_b_verbose) { printf("\tCholesky of lambda took %f sec, out of which:\n", f_lambda_chol_time + f_lambda_perm_time + f_lambda_amd_time); printf("\t\t amd: %f\n", f_lambda_amd_time); printf("\t\t perm: %f\n", f_lambda_perm_time); printf("\t\t Chol: %f, " PRIsize " elem. nnz (needs %.2f MB)\n", f_lambda_chol_time, R.n_NonZero_Num(), R.n_Allocation_Size_NoLastPage() / 1048576.0); } return true; } /** * @brief calculates block diagonal of the covariance matrix from a Schur-complemented system * * @param[out] margs_recursive is filled with the marginals upon return * @param[in] R is Cholesky factorization of the system matrix * @param[in] lam_ord is reference to the AMD ordering that was used for R * @param[in] Dinv is inverse of the (diagonal) landmark matrix * @param[in] U_Dinv is a product of the top off-diagonal block of the Schur complement and Dinv * @param[in] b_negative_Dinv_UDinv is negative flag (set if your Dinv and UDinv are negative) * * @note This function throw std::bad_alloc. */ void Recursive_Marginals(//CUberBlockMatrix &cam_cov_rec, CUberBlockMatrix &lm_cov_rec, CUberBlockMatrix &margs_recursive, const CUberBlockMatrix &R, const CMatrixOrdering &lam_ord) const // throw(std::bad_alloc) { CTimer t; CTimerSampler timer(t); double f_lambda_recformula_time = 0; { CUberBlockMatrix margs_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<Lambda_BlockSizes>(margs_ordered, R, lam_ord, mpart_Diagonal, false); // calculate the thing timer.Accum_DiffSample(f_lambda_recformula_time); m_f_time_lambda_recformula += f_lambda_recformula_time; margs_ordered.Permute_UpperTriangular_To(margs_recursive, lam_ord.p_Get_Ordering(), lam_ord.n_Ordering_Size(), false); // no share! the original will be deleted } double f_lambda_unperm_time = 0; timer.Accum_DiffSample(f_lambda_unperm_time); m_f_time_lambda_unperm += f_lambda_unperm_time; if(m_b_verbose) { printf("\trecursive margs took %f sec, out of which:\n", f_lambda_unperm_time + f_lambda_recformula_time /*+ f_lambda_chol_time + f_lambda_perm_time + f_lambda_amd_time*/); /*printf("\t amd: %.3f\n", f_lambda_amd_time); printf("\t perm: %.3f\n", f_lambda_perm_time); printf("\t Chol: %.3f, " PRIsize " elem. nnz (needs %.2f MB)\n", f_lambda_chol_time, R.n_NonZero_Num(), R.n_Allocation_Size_NoLastPage() / 1048576.0);*/ printf("\t\trform: %f\n", f_lambda_recformula_time); printf("\t\tunprm: %f\n", f_lambda_unperm_time); } /*margs_recursive.SliceTo(cam_cov_rec, 0, n_matrix_cut, 0, n_matrix_cut, true); margs_recursive.SliceTo(lm_cov_rec, n_matrix_cut, n_size, n_matrix_cut, n_size, true); // get the submatrices*/ } /** * @brief calculates block diagonal of the covariance matrix from a Schur-complemented system * * @param[out] sp_cam_inv is filled with camera marginals upon return * @param[in] b_do_cam_marginals is camera marginals flag (if set, sp_cam_inv is filled, otherwise it is empty) * @param[out] sp_lm_inv is filled with landmark marginals upon return * @param[in] S is Cholesky factorization of the Schur complement * @param[in] SC_ord is reference to the fill-reducing ordering that was used to factorize S * complement (this is *not* the ordering that separates landmarks and poses) * @param[in] n_SC_ord_size is size of the ordering that was used for Schur complement * @param[in] Dinv is inverse of the (diagonal) landmark matrix * @param[in] U_Dinv is a product of the top off-diagonal block of the Schur complement and Dinv * @param[in] b_negative_Dinv_UDinv is negative flag (set if your Dinv and UDinv are negative) * * @note This function throw std::bad_alloc. */ void Schur_Marginals(CUberBlockMatrix &sp_cam_inv, bool b_do_cam_marginals, CUberBlockMatrix &sp_lm_inv, const CUberBlockMatrix &S, const CMatrixOrdering &SC_ord, const CUberBlockMatrix &Dinv, const CUberBlockMatrix &U_Dinv, bool b_negative_Dinv_UDinv = false) const // throw(std::bad_alloc) { #ifdef __BA_MARGS_DO_MEMORY_PROFILING static bool b_warned = false; if(!b_warned) { fprintf(stderr, "warning: __BA_MARGS_DO_MEMORY_PROFILING defined! " "do not use this run for timing measurements\n"); b_warned = true; } #endif // __BA_MARGS_DO_MEMORY_PROFILING const size_t *p_SC_ord = SC_ord.p_Get_InverseOrdering(); const size_t n_SC_ord_size = SC_ord.n_Ordering_Size(); _ASSERTE(n_SC_ord_size == S.n_BlockColumn_Num()); // if this fails then the ordering is likely for the entire lambda; this is just the // ordering to get chol(shur(lambda)) CTimer t; CTimerSampler timer(t); sp_lm_inv = Dinv; // start with that if(b_negative_Dinv_UDinv) sp_lm_inv.Scale_FBS_Parallel<D_BlockSizes>(-1); // this needs to be negated; the rest of the product is squared so it cancels out double f_diag_init_time = 0; timer.Accum_DiffSample(f_diag_init_time); m_f_time_Dinv_copy += f_diag_init_time; cs *p_S = S.p_BlockStructure_to_Sparse(); CUberBlockMatrix U_Dinv_perm; U_Dinv.PermuteTo(U_Dinv_perm, p_SC_ord, n_SC_ord_size, true, false, true); // get a permuted view of U_Dinv (can be shared among the threads) cs *p_B = U_Dinv_perm.p_BlockStructure_to_Sparse(); // grab its structure (can be shared among the threads) double f_struct_time = 0; timer.Accum_DiffSample(f_struct_time); m_f_time_sp_struct += f_struct_time; CUberBlockMatrix S_bases; std::vector<CUberBlockMatrix> S_bases_thr_list; double f_parallel_S_bases_time = 0; #ifdef __BA_MARGS_DO_MEMORY_PROFILING uint64_t n_memory = 0; #endif // __BA_MARGS_DO_MEMORY_PROFILING #pragma omp parallel { #ifdef _OPENMP const size_t n_thread_num = omp_get_num_threads(); const size_t n_thread_id = omp_get_thread_num(); #else // _OPENMP static const size_t n_thread_num = 1; static const size_t n_thread_id = 0; #endif // _OPENMP #pragma omp master { S_bases_thr_list.resize(n_thread_num); } #pragma omp barrier // alloc partials in thread 0 std::vector<size_t> workspace(n_SC_ord_size * 2); // alloc thread-private workspace for cs_reach() cs *p_St_thr = cs_transpose(p_S, 0); // need a private copy of p_St for each thread { CUberBlockMatrix &S_bases_thr = S_bases_thr_list[n_thread_id]; // rename S.CopyLayoutTo(S_bases_thr); Eigen::MatrixXd/*<double, Eigen::Dynamic, 6>*/ S_dense_basis;//(S.n_Row_Num(), 6); // todo - implement matrix solving and try to make this row major // unit basis matrix const size_t n = S.n_BlockColumn_Num(); _ASSERTE(n <= INT_MAX); const int _n = int(n); #pragma omp for schedule(dynamic, 1) // t_odo - dynamic schedule? each column will likely have a different cost (todo - build histograms) for(int i = 0; i < _n; ++ i) { S_dense_basis.resize(S.n_Row_Num(), S.n_BlockColumn_Column_Num(i)); // handle different block sizes sc_margs_detail::Calculate_UpperTriangularTransposeSolve_Bases_FBS<SC_BlockSizes>(S_bases_thr, S, p_St_thr, i, S_dense_basis, workspace); // use the nice function instead } } #pragma omp barrier // wait for all the threads to compute their bases #pragma omp master { S_bases.Swap(S_bases_thr_list.front()); // start with 0 for(size_t i = 1, n = n_thread_num; i < n; ++ i) S_bases_thr_list[i].AddTo(S_bases); // no need for FBS, no two blocks will overlap // simple serial reduction in thread 0, could do a parallel one std::vector<CUberBlockMatrix> empty; S_bases_thr_list.swap(empty); timer.Accum_DiffSample(f_parallel_S_bases_time); m_f_time_S_bases += f_parallel_S_bases_time; } // reduce the bases #pragma omp barrier // synchronize the threads before continuing Eigen::Matrix3d lm_i_cov; Eigen::Matrix<double, Eigen::Dynamic, 3> U_Dinv_i_permd(U_Dinv.n_Row_Num(), 3); // those can stay allocated, the size does not change throughout the algorithm const size_t n = Dinv.n_BlockColumn_Num(); _ASSERTE(n <= INT_MAX); const int _n = int(n); #pragma omp for schedule(dynamic, 1) // t_odo - dynamic schedule? each column will likely have a different cost (todo - build histograms) for(int i = 0; i < _n; ++ i) { CUberBlockMatrix U_Dinv_i_perm; U_Dinv_perm.SliceTo(U_Dinv_i_perm, 0, U_Dinv_perm.n_BlockRow_Num(), i, i + 1, true); // grab a single column of U_Dinv_perm (via reference) CUberBlockMatrix SinvT_U_Dinv_i_perm; SinvT_U_Dinv_i_perm.ProductOf_FBS<SC_BlockSizes, U_BlockSizes>(S_bases, U_Dinv_i_perm); // gets a sparse matrix, the size of U_Dinv_i_perm #ifdef __BA_MARGS_DO_MEMORY_PROFILING size_t n_mem_col = SinvT_U_Dinv_i_perm.n_Allocation_Size_NoLastPage(); #pragma omp critical { n_memory = std::max(uint64_t(n_mem_col), n_memory); } #endif // __BA_MARGS_DO_MEMORY_PROFILING CUberBlockMatrix::_TyMatrixXdRef t_lminv_ii = sp_lm_inv.t_GetBlock_Log(i, i); sc_margs_detail::BlockVector_PreMultiplyWithSelfTranspose_Add_FBS<U_BlockSizes>(t_lminv_ii, SinvT_U_Dinv_i_perm); } cs_spfree(p_St_thr); } // calculate block diagonal covariances of only the landmarks #ifdef __BA_MARGS_DO_MEMORY_PROFILING n_memory += CSparseMatrixMemInfo::n_Allocation_Size(p_S) + CSparseMatrixMemInfo::n_Allocation_Size(p_B); //n_memory += sp_lm_inv.n_Allocation_Size_NoLastPage(); // the marginals themselves // not! #endif // __BA_MARGS_DO_MEMORY_PROFILING cs_spfree(p_S); cs_spfree(p_B); double f_inverse_time = 0, f_lminv_time = 0; timer.Accum_DiffSample(f_inverse_time); m_f_time_lm_inverse += f_inverse_time; f_lminv_time = f_struct_time + f_diag_init_time + f_parallel_S_bases_time + f_inverse_time; if(m_b_verbose) { printf("\tSchur margs took %f sec, out of which:\n", f_lminv_time); printf("\t\tstruc: %f\n", f_struct_time); printf("\t\tbases: %f (%.2f %% sparsity, S has %.2f %%, needed %.2f MB)\n", f_parallel_S_bases_time, 100 * double(S_bases.n_Block_Num() * 6 * 6) / (S_bases.n_BlockColumn_Num() * S_bases.n_BlockColumn_Num() * 6 * 6), 100 * double(S.n_Block_Num() * 6 * 6) / (S.n_BlockColumn_Num() * S.n_BlockColumn_Num() * 6 * 6), S_bases.n_Allocation_Size_NoLastPage() / 1048576.0); printf("\t\tdinit: %f\n", f_diag_init_time); printf("\t\t inv: %f\n", f_inverse_time); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\tSchur margs took " PRIsizeB "B of memory\n", PRIsizeBparams(n_memory)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } double f_stats_time = 0; timer.Accum_DiffSample(f_stats_time); #ifdef __BA_MARGS_DO_MEMORY_PROFILING uint64_t n_memory_cams = 0; #endif // __BA_MARGS_DO_MEMORY_PROFILING if(b_do_cam_marginals) { CUberBlockMatrix &rcs_cov = sp_cam_inv; // just rename { CUberBlockMatrix margs_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<SC_BlockSizes>(margs_ordered, S, SC_ord, mpart_Diagonal, false); margs_ordered.Permute_UpperTriangular_To(rcs_cov, SC_ord.p_Get_Ordering(), SC_ord.n_Ordering_Size(), false); // no share! the original will be deleted } double f_rcs_inverse_time = 0; timer.Accum_DiffSample(f_rcs_inverse_time); m_f_time_cam_inverse += f_rcs_inverse_time; #ifdef __BA_MARGS_DO_MEMORY_PROFILING n_memory_cams = rcs_cov.n_Allocation_Size_NoLastPage(); // !! compute even if not in verbose for(size_t i = 0, n = rcs_cov.n_BlockColumn_Num(); i < n; ++ i) { size_t n_dof = rcs_cov.n_BlockColumn_Column_Num(i); _ASSERTE(n_memory_cams >= n_dof * n_dof * sizeof(double)); // make sure the below line does not underflow n_memory_cams -= n_dof * n_dof * sizeof(double); } // do not count the size of marginals we're trying to return! just count the unwanted off-diagonals #endif // __BA_MARGS_DO_MEMORY_PROFILING if(m_b_verbose) { printf("\trecursive inverse of camera SC took %f sec (recovered " PRIsize " blocks)\n", f_rcs_inverse_time, rcs_cov.n_Block_Num()); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\trecursive inverse of camera SC takes " PRIsizeB "B of memory\n", PRIsizeBparams(n_memory_cams)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } } else sp_cam_inv.Clear(); #ifdef __BA_MARGS_DO_MEMORY_PROFILING m_n_worst_memory = std::max(m_n_worst_memory, n_memory + n_memory_cams); #endif // __BA_MARGS_DO_MEMORY_PROFILING } }; #endif // !__BA_MARGINALS_INCLUDED
#pragma once #ifndef __BA_MARGINALS_INCLUDED #define __BA_MARGINALS_INCLUDED /** * @file include/slam/BAMarginals.h * @brief helper classes for covariance recovery in Schur-complemented systems * @author -tHE SWINe- * @date 2016-02-15 */ #include "slam/Marginals.h" #include "slam/MemUsage.h" // PRIsizeB /** * @def __BA_MARGS_DO_MEMORY_PROFILING * @brief if defined, memory use of the marginals will be profiled */ //#define __BA_MARGS_DO_MEMORY_PROFILING /** * @brief internal functions for Schur complement marginals */ namespace sc_margs_detail { // todo - use wrap3 below /** * @brief multiply add operation with a sparse matrix and a block vector */ class CBlockVectorMAD_Impl { public: struct TInnerContext { double *p_dest; const double *p_block; inline TInnerContext(double *_p_dest, const double *_p_block) :p_dest(_p_dest), p_block(_p_block) {} }; template <const int n_row_height, class CColumnWidth> class CInnerLoop { // not dependent on CBlockSizeList; don't make it an inner class of COuterLoop public: enum { n_column_width = CColumnWidth::n_size }; typedef typename CUberBlockMatrix::CMakeMatrixRef<n_column_width, n_column_width>::_Ty _TyDestMap; typedef typename CUberBlockMatrix::CMakeMatrixRef<n_row_height, n_column_width>::_TyConst _TyBlockMap; public: static inline void Do(TInnerContext t_ctx) { _TyBlockMap fbs_block(t_ctx.p_block); _TyDestMap(t_ctx.p_dest) += fbs_block.transpose().lazyProduct(fbs_block); // mad } }; struct TOuterContext { double *p_dest; const CUberBlockMatrix &r_block_vector; inline TOuterContext(double *_p_dest, const CUberBlockMatrix &_r_block_vector) :p_dest(_p_dest), r_block_vector(_r_block_vector) {} }; template <const int n_column_width, class CBlockSizeList> class COuterLoop { public: static inline void Do(TOuterContext t_ctx) { _ASSERTE(n_column_width == t_ctx.r_block_vector.n_BlockColumn_Column_Num(0)); for(size_t i = 0, n = t_ctx.r_block_vector.n_BlockColumn_Block_Num(0); i < n; ++ i) { CUberBlockMatrix::_TyMatrixXdRef t_block = const_cast<CUberBlockMatrix&>(t_ctx.r_block_vector).t_Block_AtColumn(0, i); // hack - need to cast, const_ref does not expose its pointer to data size_t n_row_height = t_block.rows(); _ASSERTE(n_column_width == t_block.cols()); fbs_ut::CWrap2<CInnerLoop, fbs_ut::CCTSize<n_column_width> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CBlockSizeList, n_column_width>(int(n_row_height), TInnerContext(t_ctx.p_dest, t_block.data()/*&t_block(0, 0)*/)); // mad } // for each nnz block } }; /*public: template <class CBlockSizeList> static void BlockVector_PreMultiplyWithSelfTranspose_Add_FBS(double *p_dest, const CUberBlockMatrix &r_block_vector) // g++ is unable to reference TOuterLoop from the outside for some reason { fbs_ut::CWrap2<COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>( r_block_vector.n_Column_Num(), TOuterContext(p_dest, r_block_vector)); // decide over vector width }*/ }; template <class CBlockSizeList, class CDestMatrix> inline void BlockVector_PreMultiplyWithSelfTranspose_Add_FBS(CDestMatrix &r_dest, const CUberBlockMatrix &r_block_vector) { _ASSERTE(r_block_vector.n_BlockColumn_Num() == 1); // it is a block column-vector _ASSERTE(r_block_vector.n_Column_Num() == r_dest.rows()); // the number of columns in the block vector matches the size of the destination _ASSERTE(r_dest.rows() == r_dest.cols()); // the output is square _ASSERTE(!(CDestMatrix::Flags & Eigen::RowMajor)); // must be column-major otherwise the conversion to pointer strips data //_ASSERTE(CDestMatrix::MaxColsAtCompileTime == Eigen::Dynamic || // CDestMatrix::MaxColsAtCompileTime == r_dest.cols() || // r_dest.cols() == 1); // the stride must be tight or there is a single col and it does not matter _ASSERTE(r_dest.cols() <= 1 || &r_dest(0, 1) == &r_dest(0, 0) + r_dest.rows()); // the stride must be tight or there is a single col and it does not matter // do not zero r_dest //CBlockVectorMAD_Impl::template BlockVector_PreMultiplyWithSelfTranspose_Add_FBS<CBlockSizeList>(r_dest.data(), r_block_vector); fbs_ut::CWrap2<CBlockVectorMAD_Impl::COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>(int(r_block_vector.n_Column_Num()), CBlockVectorMAD_Impl::TOuterContext(r_dest.data(), r_block_vector)); // decide over vector width } inline void Calculate_UpperTriangularTransposeSolve_Bases(CUberBlockMatrix &S_bases, const CUberBlockMatrix &S, /*const*/ cs *p_St, size_t n_column, Eigen::MatrixXd &r_workspace, std::vector<size_t> &r_workspace1) // this is inline, to avoid link conflicts { _ASSERTE(S.b_EqualLayout(S_bases)); // will yield a matrix with the same sparsity structure //S.CopyLayoutTo(S_bases); _ASSERTE(n_column < S.n_BlockColumn_Num()); // make sure the column is inside the matrix _ASSERTE(p_St->m == S.n_BlockRow_Num() && p_St->n == S.n_BlockColumn_Num()); // should be the same matrix _ASSERTE(sizeof(csi) == sizeof(size_t)); r_workspace1.resize(2 * p_St->n); // alloc workspace { //cs *p_St = cs_transpose(p_S, 0); // need p_St /*csi p_col[2] = {0, 1}; csi n_row = 0; cs B; B.m = p_St->m; B.n = 1; B.p = p_col; B.i = &n_row; B.x = 0; B.nzmax = 1; B.nz = -1;*/ // prepare a single entry CSC matrix //Eigen::Matrix<double, Eigen::Dynamic, 6> S_dense_basis(S.n_Row_Num(), 6); // todo - implement matrix solving and try to make this row major // unit basis matrix //for(size_t n_column = 0, n = S.n_BlockColumn_Num(); n_column < n; ++ n_column) { // t_odo - do this in parallel (probably explicit matrix reduction rather than locking) { //size_t n_column = n_column; size_t w = S.n_BlockColumn_Column_Num(n_column); // t_odo - FBS it //_ASSERTE(w == 6); //n_row = n_column; //size_t n_first_dep_col = cs_reach(p_St, &B, 0, (csi*)&r_workspace1.front(), 0); // modifies p_St but then puts it back size_t n_first_dep_col = cs_dfs(n_column, p_St, p_St->n, (csi*)&r_workspace1.front(), (csi*)&r_workspace1.front() + p_St->n, 0); // no need for B // todo - reimplement this directly on block matrices, try to avoid needing the transpose size_t n_dep_col_num = p_St->n - n_first_dep_col; const size_t *p_dep_col = &r_workspace1[n_first_dep_col]; for(size_t j = 0; j < n_dep_col_num; ++ j) CS_MARK(p_St->p, p_dep_col[j]); // restore col. pointers after calling cs_dfs // get the list of columns of S that affect block U_Dinv_{n_column, *} r_workspace.resize(S.n_Row_Num(), w); // alloc workspace Eigen::MatrixXd &S_dense_basis = r_workspace; // just rename //Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>, Eigen::Aligned> // S_dense_basis(r_workspace.data(), S.n_Row_Num(), w); // map the workspace as (column-wise) fixed-size matrix S_dense_basis.setZero(); S_dense_basis.middleRows(S_bases.n_BlockColumn_Base(n_column), w).setIdentity(); // create a vector of zeros for(size_t c = 0; c < w; ++ c) { //S.UpperTriangularTranspose_Solve(&U_Dinv_i_permd.col(c)(0), U_Dinv_i_permd.rows(), p_dep_col, n_dep_col_num); S.UpperTriangularTranspose_Solve/*_FBS<SC_BlockSizes>*/(&S_dense_basis.col(c)(0), S_dense_basis.rows(), p_dep_col, n_dep_col_num); } // sparse sparse UTTSolve for(size_t j = 0; j < n_dep_col_num; ++ j) { size_t n_row = p_dep_col[j]; size_t y = S_bases.n_BlockColumn_Base(n_row); size_t h = S_bases.n_BlockColumn_Column_Num(n_row); // those are rows but S_bases is symmetric #ifdef _DEBUG if(j > 0) { const size_t r_prev = p_dep_col[j - 1]; size_t y_prev = S_bases.n_BlockColumn_Base(r_prev); size_t h_prev = S_bases.n_BlockColumn_Column_Num(r_prev); size_t e_prev = y_prev + h_prev; _ASSERTE(S_dense_basis.middleRows(e_prev, y - e_prev).squaredNorm() == 0); // make sure there are zeros between consecutive (nonadjacent) blocks } else if(y > 0) _ASSERTE(S_dense_basis.topRows(y).squaredNorm() == 0); // make sure there are zeros above the first block if(j + 1 == n_dep_col_num) _ASSERTE(S_dense_basis.bottomRows(S_dense_basis.rows() - (y + h)).squaredNorm() == 0); // make sure there are zeros till the end // make sure that there are only zeros in between the elements #endif // _DEBUG //_ASSERTE(h == 6); //_ASSERTE(S_bases.n_BlockColumn_Column_Num(n_row) == 6); // t_odo - FBS it S_bases.t_GetBlock_Log(n_row, n_column, h, w) = S_dense_basis.middleRows(S_bases.n_BlockColumn_Base(n_row), h); // this is transposed (transpose the block as well?): each row is a single basis; this only works if the structure of S is symmetric } // sparse fill the bases matrix } //S_bases.Rasterize("S_bases.tga", 3); // ... } } /** * @brief FBS implementation for the upper triangular transpose solve of the sparse bases matrix */ class CUTTSolve_Bases_Impl { public: struct TInnerContext { CUberBlockMatrix &r_dest; const size_t n_column; const Eigen::MatrixXd &r_src; const size_t n_row; inline TInnerContext(CUberBlockMatrix &_r_dest, size_t _n_column, const Eigen::MatrixXd &_r_src, size_t _n_row) :r_dest(_r_dest), n_column(_n_column), r_src(_r_src), n_row(_n_row) {} }; template <const int n_row_height, class CColumnWidth> class CInnerLoop { public: enum { n_column_width = CColumnWidth::n_size }; public: static inline void Do(TInnerContext t_ctx) { _ASSERTE(t_ctx.r_src.rows() == t_ctx.r_dest.n_Row_Num()); _ASSERTE(n_column_width == t_ctx.r_dest.n_BlockColumn_Column_Num(t_ctx.n_column)); Eigen::Map<const Eigen::Matrix<double, Eigen::Dynamic, n_column_width>, Eigen::Aligned> S_dense_basis(t_ctx.r_src.data(), t_ctx.r_src.rows(), n_column_width); // map the source as (column-wise) fixed-size matrix Eigen::Map<Eigen::Matrix<double, n_row_height, n_column_width> > dest_block(t_ctx.r_dest.p_GetBlock_Log(t_ctx.n_row, t_ctx.n_column, n_row_height, n_column_width, true, false)); dest_block = S_dense_basis.template middleRows<n_row_height>(t_ctx.r_dest.n_BlockColumn_Base(t_ctx.n_row)); } }; struct TOuterContext { CUberBlockMatrix &r_S_bases; const size_t n_column; const CUberBlockMatrix &r_S; Eigen::MatrixXd &r_workspace; const size_t *p_dep_col; const size_t n_dep_num; inline TOuterContext(CUberBlockMatrix &_r_S_bases, size_t _n_column, const CUberBlockMatrix &_r_S, Eigen::MatrixXd &_r_workspace, const size_t *_p_dep_col, size_t _n_dep_num) :r_S_bases(_r_S_bases), n_column(_n_column), r_S(_r_S), r_workspace(_r_workspace), p_dep_col(_p_dep_col), n_dep_num(_n_dep_num) {} }; template <const int n_column_width, class CBlockSizeList> class COuterLoop { public: static inline void Do(TOuterContext t_ctx) { t_ctx.r_workspace.resize(t_ctx.r_S.n_Row_Num(), n_column_width); // alloc workspace Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, n_column_width>, Eigen::Aligned> S_dense_basis(t_ctx.r_workspace.data(), t_ctx.r_S.n_Row_Num(), n_column_width); // map the workspace as (column-wise) fixed-size matrix S_dense_basis.setZero(); S_dense_basis.template middleRows<n_column_width>(t_ctx.r_S.n_BlockColumn_Base(t_ctx.n_column)).setIdentity(); // create a vector of zeros for(size_t c = 0; c < n_column_width; ++ c) { // todo - make a version of UpperTriangularTranspose_Solve_FBS for vectors t_ctx.r_S.UpperTriangularTranspose_Solve_FBS<CBlockSizeList>(&S_dense_basis.col(c)(0), S_dense_basis.rows(), t_ctx.p_dep_col, t_ctx.n_dep_num); } // sparse sparse UTTSolve for(size_t j = 0; j < t_ctx.n_dep_num; ++ j) { size_t n_row = t_ctx.p_dep_col[j]; size_t h = t_ctx.r_S.n_BlockColumn_Column_Num(n_row); // those are rows but S_bases is symmetric #ifdef _DEBUG size_t y = t_ctx.r_S.n_BlockColumn_Base(n_row); if(j > 0) { const size_t r_prev = t_ctx.p_dep_col[j - 1]; size_t y_prev = t_ctx.r_S.n_BlockColumn_Base(r_prev); size_t h_prev = t_ctx.r_S.n_BlockColumn_Column_Num(r_prev); size_t e_prev = y_prev + h_prev; _ASSERTE(S_dense_basis.middleRows(e_prev, y - e_prev).squaredNorm() == 0); // make sure there are zeros between consecutive (nonadjacent) blocks } else if(y > 0) _ASSERTE(S_dense_basis.topRows(y).squaredNorm() == 0); // make sure there are zeros above the first block if(j + 1 == t_ctx.n_dep_num) _ASSERTE(S_dense_basis.bottomRows(S_dense_basis.rows() - (y + h)).squaredNorm() == 0); // make sure there are zeros till the end // make sure that there are only zeros in between the elements #endif // _DEBUG //_ASSERTE(h == 6); //_ASSERTE(S_bases.n_BlockColumn_Column_Num(n_row) == 6); // t_odo - FBS it //S_bases.t_GetBlock_Log(n_row, n_column, h, w) = // S_dense_basis.middleRows<6>(S_bases.n_BlockColumn_Base(n_row)); // this is transposed (transpose the block as well?): each row is a single basis; this only works if the structure of S is symmetric fbs_ut::CWrap2<CInnerLoop, fbs_ut::CCTSize<n_column_width> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CBlockSizeList, n_column_width>(int(h), TInnerContext(t_ctx.r_S_bases, t_ctx.n_column, t_ctx.r_workspace, n_row)); // use SSE to copy stuff around } // sparse fill the bases matrix } }; }; template <class CBlockSizeList> void Calculate_UpperTriangularTransposeSolve_Bases_FBS(CUberBlockMatrix &S_bases, const CUberBlockMatrix &S, /*const*/ cs *p_St, size_t n_column, Eigen::MatrixXd &r_workspace, std::vector<size_t> &r_workspace1) { _ASSERTE(S.b_EqualLayout(S_bases)); // will yield a matrix with the same sparsity structure //S.CopyLayoutTo(S_bases); _ASSERTE(n_column < S.n_BlockColumn_Num()); // make sure the column is inside the matrix _ASSERTE(p_St->m == S.n_BlockRow_Num() && p_St->n == S.n_BlockColumn_Num()); // should be the same matrix _ASSERTE(sizeof(csi) == sizeof(size_t)); r_workspace1.resize(2 * p_St->n); // alloc workspace size_t w = S.n_BlockColumn_Column_Num(n_column); // t_odo - FBS it //_ASSERTE(w == 6); //n_row = n_column; //size_t n_first_dep_col = cs_reach(p_St, &B, 0, (csi*)&r_workspace1.front(), 0); // modifies p_St but then puts it back size_t n_first_dep_col = cs_dfs(n_column, p_St, p_St->n, (csi*)&r_workspace1.front(), (csi*)&r_workspace1.front() + p_St->n, 0); // no need for B // todo - reimplement this directly on block matrices, try to avoid needing the transpose size_t n_dep_col_num = p_St->n - n_first_dep_col; const size_t *p_dep_col = &r_workspace1[n_first_dep_col]; for(size_t j = 0; j < n_dep_col_num; ++ j) CS_MARK(p_St->p, p_dep_col[j]); // restore col. pointers after calling cs_dfs // get the list of columns of S that affect block U_Dinv_{n_column, *} // note that this is FBS-independent fbs_ut::CWrap2<CUTTSolve_Bases_Impl::COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>(int(w), CUTTSolve_Bases_Impl::TOuterContext(S_bases, n_column, S, r_workspace, p_dep_col, n_dep_col_num)); } } // ~sc_margs_detail template <class _SC_BlockSizes, class _U_BlockSizes, class _V_BlockSizes, class _D_BlockSizes> class CSchurComplement_Marginals { public: typedef _SC_BlockSizes SC_BlockSizes; typedef _SC_BlockSizes A_BlockSizes; // the same typedef _U_BlockSizes U_BlockSizes; typedef _V_BlockSizes V_BlockSizes; typedef _D_BlockSizes D_BlockSizes; typedef typename CUniqueTypelist<typename CConcatTypelist<typename CConcatTypelist<A_BlockSizes, U_BlockSizes>::_TyResult, typename CConcatTypelist<V_BlockSizes, D_BlockSizes>::_TyResult>::_TyResult>::_TyResult Lambda_BlockSizes; protected: bool m_b_verbose; mutable double m_f_time_Dinv_copy; mutable double m_f_time_sp_struct; mutable double m_f_time_S_bases; mutable double m_f_time_lm_inverse; mutable double m_f_time_cam_inverse; mutable double m_f_time_lambda_AMD; mutable double m_f_time_lambda_perm; mutable double m_f_time_lambda_Chol; mutable double m_f_time_lambda_recformula; mutable double m_f_time_lambda_unperm; mutable uint64_t m_n_worst_memory; public: CSchurComplement_Marginals(bool b_verbose = false) :m_b_verbose(b_verbose), m_f_time_Dinv_copy(0), m_f_time_sp_struct(0), m_f_time_S_bases(0), m_f_time_lm_inverse(0), m_f_time_cam_inverse(0), m_f_time_lambda_AMD(0), m_f_time_lambda_perm(0), m_f_time_lambda_Chol(0), m_f_time_lambda_recformula(0), m_f_time_lambda_unperm(0), m_n_worst_memory(0) {} inline bool b_Verbose() const { return m_b_verbose; } void Add_LambdaChol_Time(double f_time_AMD, double f_time_perm, double f_time_Chol) { m_f_time_lambda_AMD += f_time_AMD; m_f_time_lambda_perm += f_time_perm; m_f_time_lambda_Chol += f_time_Chol; } void Dump() const { printf("\trecursive margs took %f sec, out of which:\n", m_f_time_lambda_AMD + m_f_time_lambda_perm + m_f_time_lambda_Chol + m_f_time_lambda_recformula + m_f_time_lambda_unperm); printf("\t\t amd: %f\n", m_f_time_lambda_AMD); printf("\t\t perm: %f\n", m_f_time_lambda_perm); printf("\t\t Chol: %f\n", m_f_time_lambda_Chol); printf("\t\trform: %f\n", m_f_time_lambda_recformula); printf("\t\tunprm: %f\n", m_f_time_lambda_unperm); printf("\tSchur margs took %f sec, out of which:\n", m_f_time_Dinv_copy + m_f_time_sp_struct + m_f_time_S_bases + m_f_time_lm_inverse); printf("\t\tdinit: %f\n", m_f_time_Dinv_copy); printf("\t\tstruc: %f\n", m_f_time_sp_struct); printf("\t\tbases: %f\n", m_f_time_S_bases); printf("\t\t inv: %f\n", m_f_time_lm_inverse); printf("\trecursive inverse of cameras using SC took %f sec\n", m_f_time_cam_inverse); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\tSchur marginals took " PRIsizeB "B memory at most\n", PRIsizeBparams(m_n_worst_memory)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } bool Get_CholeskyLambda(CUberBlockMatrix &R, CMatrixOrdering &lam_ord, const CUberBlockMatrix &lambda) const { CTimer t; CTimerSampler timer(t); lam_ord.p_BlockOrdering(lambda, true); // w.r.t. lambda_perm const size_t *p_lam_ord = lam_ord.p_Get_InverseOrdering(); const size_t n_lam_ord_size = lam_ord.n_Ordering_Size(); // ordering for lambda double f_lambda_amd_time = 0; timer.Accum_DiffSample(f_lambda_amd_time); m_f_time_lambda_AMD += f_lambda_amd_time; CUberBlockMatrix lambda_amd; lambda.Permute_UpperTriangular_To(lambda_amd, p_lam_ord, n_lam_ord_size, true); double f_lambda_perm_time = 0; timer.Accum_DiffSample(f_lambda_perm_time); m_f_time_lambda_perm += f_lambda_perm_time; /*typedef CConcatTypelist<CConcatTypelist<SC_BlockSizes, U_BlockSizes>::_TyResult, CConcatTypelist<V_BlockSizes, D_BlockSizes>::_TyResult>::_TyResult Lambda_BlockSizes;*/ if(!R.CholeskyOf_FBS<Lambda_BlockSizes>(lambda_amd)) { fprintf(stderr, "error: got not pos def when factorizing lambda\n"); return false; } double f_lambda_chol_time = 0; timer.Accum_DiffSample(f_lambda_chol_time); m_f_time_lambda_Chol += f_lambda_chol_time; if(m_b_verbose) { printf("\tCholesky of lambda took %f sec, out of which:\n", f_lambda_chol_time + f_lambda_perm_time + f_lambda_amd_time); printf("\t\t amd: %f\n", f_lambda_amd_time); printf("\t\t perm: %f\n", f_lambda_perm_time); printf("\t\t Chol: %f, " PRIsize " elem. nnz (needs %.2f MB)\n", f_lambda_chol_time, R.n_NonZero_Num(), R.n_Allocation_Size_NoLastPage() / 1048576.0); } return true; } /** * @brief calculates block diagonal of the covariance matrix from a Schur-complemented system * * @param[out] margs_recursive is filled with the marginals upon return * @param[in] R is Cholesky factorization of the system matrix * @param[in] lam_ord is reference to the AMD ordering that was used for R * @param[in] Dinv is inverse of the (diagonal) landmark matrix * @param[in] U_Dinv is a product of the top off-diagonal block of the Schur complement and Dinv * @param[in] b_negative_Dinv_UDinv is negative flag (set if your Dinv and UDinv are negative) * * @note This function throw std::bad_alloc. */ void Recursive_Marginals(//CUberBlockMatrix &cam_cov_rec, CUberBlockMatrix &lm_cov_rec, CUberBlockMatrix &margs_recursive, const CUberBlockMatrix &R, const CMatrixOrdering &lam_ord) const // throw(std::bad_alloc) { CTimer t; CTimerSampler timer(t); double f_lambda_recformula_time = 0; { CUberBlockMatrix margs_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<Lambda_BlockSizes>(margs_ordered, R, lam_ord, mpart_Diagonal, false); // calculate the thing timer.Accum_DiffSample(f_lambda_recformula_time); m_f_time_lambda_recformula += f_lambda_recformula_time; margs_ordered.Permute_UpperTriangular_To(margs_recursive, lam_ord.p_Get_Ordering(), lam_ord.n_Ordering_Size(), false); // no share! the original will be deleted } double f_lambda_unperm_time = 0; timer.Accum_DiffSample(f_lambda_unperm_time); m_f_time_lambda_unperm += f_lambda_unperm_time; if(m_b_verbose) { printf("\trecursive margs took %f sec, out of which:\n", f_lambda_unperm_time + f_lambda_recformula_time /*+ f_lambda_chol_time + f_lambda_perm_time + f_lambda_amd_time*/); /*printf("\t amd: %.3f\n", f_lambda_amd_time); printf("\t perm: %.3f\n", f_lambda_perm_time); printf("\t Chol: %.3f, " PRIsize " elem. nnz (needs %.2f MB)\n", f_lambda_chol_time, R.n_NonZero_Num(), R.n_Allocation_Size_NoLastPage() / 1048576.0);*/ printf("\t\trform: %f\n", f_lambda_recformula_time); printf("\t\tunprm: %f\n", f_lambda_unperm_time); } /*margs_recursive.SliceTo(cam_cov_rec, 0, n_matrix_cut, 0, n_matrix_cut, true); margs_recursive.SliceTo(lm_cov_rec, n_matrix_cut, n_size, n_matrix_cut, n_size, true); // get the submatrices*/ } /** * @brief calculates block diagonal of the covariance matrix from a Schur-complemented system * * @param[out] sp_cam_inv is filled with camera marginals upon return * @param[in] b_do_cam_marginals is camera marginals flag (if set, sp_cam_inv is filled, otherwise it is empty) * @param[out] sp_lm_inv is filled with landmark marginals upon return * @param[in] S is Cholesky factorization of the Schur complement * @param[in] SC_ord is reference to the fill-reducing ordering that was used to factorize S * complement (this is *not* the ordering that separates landmarks and poses) * @param[in] n_SC_ord_size is size of the ordering that was used for Schur complement * @param[in] Dinv is inverse of the (diagonal) landmark matrix * @param[in] U_Dinv is a product of the top off-diagonal block of the Schur complement and Dinv * @param[in] b_negative_Dinv_UDinv is negative flag (set if your Dinv and UDinv are negative) * * @note This function throw std::bad_alloc. */ void Schur_Marginals(CUberBlockMatrix &sp_cam_inv, bool b_do_cam_marginals, CUberBlockMatrix &sp_lm_inv, const CUberBlockMatrix &S, const CMatrixOrdering &SC_ord, const CUberBlockMatrix &Dinv, const CUberBlockMatrix &U_Dinv, bool b_negative_Dinv_UDinv = false) const // throw(std::bad_alloc) { #ifdef __BA_MARGS_DO_MEMORY_PROFILING static bool b_warned = false; if(!b_warned) { fprintf(stderr, "warning: __BA_MARGS_DO_MEMORY_PROFILING defined! " "do not use this run for timing measurements\n"); b_warned = true; } #endif // __BA_MARGS_DO_MEMORY_PROFILING const size_t *p_SC_ord = SC_ord.p_Get_InverseOrdering(); const size_t n_SC_ord_size = SC_ord.n_Ordering_Size(); _ASSERTE(n_SC_ord_size == S.n_BlockColumn_Num()); // if this fails then the ordering is likely for the entire lambda; this is just the // ordering to get chol(shur(lambda)) CTimer t; CTimerSampler timer(t); sp_lm_inv = Dinv; // start with that if(b_negative_Dinv_UDinv) sp_lm_inv.Scale_FBS_Parallel<D_BlockSizes>(-1); // this needs to be negated; the rest of the product is squared so it cancels out double f_diag_init_time = 0; timer.Accum_DiffSample(f_diag_init_time); m_f_time_Dinv_copy += f_diag_init_time; cs *p_S = S.p_BlockStructure_to_Sparse(); CUberBlockMatrix U_Dinv_perm; U_Dinv.PermuteTo(U_Dinv_perm, p_SC_ord, n_SC_ord_size, true, false, true); // get a permuted view of U_Dinv (can be shared among the threads) cs *p_B = U_Dinv_perm.p_BlockStructure_to_Sparse(); // grab its structure (can be shared among the threads) double f_struct_time = 0; timer.Accum_DiffSample(f_struct_time); m_f_time_sp_struct += f_struct_time; CUberBlockMatrix S_bases; std::vector<CUberBlockMatrix> S_bases_thr_list; double f_parallel_S_bases_time = 0; #ifdef __BA_MARGS_DO_MEMORY_PROFILING uint64_t n_memory = 0; #endif // __BA_MARGS_DO_MEMORY_PROFILING #pragma omp master { S_bases_thr_list.resize(n_thread_num); } // alloc partials in thread 0 std::vector<size_t> workspace(n_SC_ord_size * 2); // alloc thread-private workspace for cs_reach() cs *p_St_thr = cs_transpose(p_S, 0); // need a private copy of p_St for each thread { CUberBlockMatrix &S_bases_thr = S_bases_thr_list[n_thread_id]; // rename S.CopyLayoutTo(S_bases_thr); Eigen::MatrixXd/*<double, Eigen::Dynamic, 6>*/ S_dense_basis;//(S.n_Row_Num(), 6); // todo - implement matrix solving and try to make this row major // unit basis matrix const size_t n = S.n_BlockColumn_Num(); _ASSERTE(n <= INT_MAX); const int _n = int(n); for(int i = 0; i < _n; ++ i) { S_dense_basis.resize(S.n_Row_Num(), S.n_BlockColumn_Column_Num(i)); // handle different block sizes sc_margs_detail::Calculate_UpperTriangularTransposeSolve_Bases_FBS<SC_BlockSizes>(S_bases_thr, S, p_St_thr, i, S_dense_basis, workspace); // use the nice function instead } } // wait for all the threads to compute their bases S_bases.Swap(S_bases_thr_list.front()); // start with 0 for(size_t i = 1, n = n_thread_num; i < n; ++ i) S_bases_thr_list[i].AddTo(S_bases); // no need for FBS, no two blocks will overlap // simple serial reduction in thread 0, could do a parallel one std::vector<CUberBlockMatrix> empty; S_bases_thr_list.swap(empty); timer.Accum_DiffSample(f_parallel_S_bases_time); m_f_time_S_bases += f_parallel_S_bases_time; // reduce the bases // synchronize the threads before continuing Eigen::Matrix3d lm_i_cov; Eigen::Matrix<double, Eigen::Dynamic, 3> U_Dinv_i_permd(U_Dinv.n_Row_Num(), 3); // those can stay allocated, the size does not change throughout the algorithm const size_t n = Dinv.n_BlockColumn_Num(); _ASSERTE(n <= INT_MAX); const int _n = int(n); for(int i = 0; i < _n; ++ i) { CUberBlockMatrix U_Dinv_i_perm; U_Dinv_perm.SliceTo(U_Dinv_i_perm, 0, U_Dinv_perm.n_BlockRow_Num(), i, i + 1, true); // grab a single column of U_Dinv_perm (via reference) CUberBlockMatrix SinvT_U_Dinv_i_perm; SinvT_U_Dinv_i_perm.ProductOf_FBS<SC_BlockSizes, U_BlockSizes>(S_bases, U_Dinv_i_perm); // gets a sparse matrix, the size of U_Dinv_i_perm #ifdef __BA_MARGS_DO_MEMORY_PROFILING size_t n_mem_col = SinvT_U_Dinv_i_perm.n_Allocation_Size_NoLastPage(); n_memory = std::max(uint64_t(n_mem_col), n_memory); #endif // __BA_MARGS_DO_MEMORY_PROFILING CUberBlockMatrix::_TyMatrixXdRef t_lminv_ii = sp_lm_inv.t_GetBlock_Log(i, i); sc_margs_detail::BlockVector_PreMultiplyWithSelfTranspose_Add_FBS<U_BlockSizes>(t_lminv_ii, SinvT_U_Dinv_i_perm); } cs_spfree(p_St_thr); // calculate block diagonal covariances of only the landmarks #ifdef __BA_MARGS_DO_MEMORY_PROFILING n_memory += CSparseMatrixMemInfo::n_Allocation_Size(p_S) + CSparseMatrixMemInfo::n_Allocation_Size(p_B); //n_memory += sp_lm_inv.n_Allocation_Size_NoLastPage(); // the marginals themselves // not! #endif // __BA_MARGS_DO_MEMORY_PROFILING cs_spfree(p_S); cs_spfree(p_B); double f_inverse_time = 0, f_lminv_time = 0; timer.Accum_DiffSample(f_inverse_time); m_f_time_lm_inverse += f_inverse_time; f_lminv_time = f_struct_time + f_diag_init_time + f_parallel_S_bases_time + f_inverse_time; if(m_b_verbose) { printf("\tSchur margs took %f sec, out of which:\n", f_lminv_time); printf("\t\tstruc: %f\n", f_struct_time); printf("\t\tbases: %f (%.2f %% sparsity, S has %.2f %%, needed %.2f MB)\n", f_parallel_S_bases_time, 100 * double(S_bases.n_Block_Num() * 6 * 6) / (S_bases.n_BlockColumn_Num() * S_bases.n_BlockColumn_Num() * 6 * 6), 100 * double(S.n_Block_Num() * 6 * 6) / (S.n_BlockColumn_Num() * S.n_BlockColumn_Num() * 6 * 6), S_bases.n_Allocation_Size_NoLastPage() / 1048576.0); printf("\t\tdinit: %f\n", f_diag_init_time); printf("\t\t inv: %f\n", f_inverse_time); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\tSchur margs took " PRIsizeB "B of memory\n", PRIsizeBparams(n_memory)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } double f_stats_time = 0; timer.Accum_DiffSample(f_stats_time); #ifdef __BA_MARGS_DO_MEMORY_PROFILING uint64_t n_memory_cams = 0; #endif // __BA_MARGS_DO_MEMORY_PROFILING if(b_do_cam_marginals) { CUberBlockMatrix &rcs_cov = sp_cam_inv; // just rename { CUberBlockMatrix margs_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<SC_BlockSizes>(margs_ordered, S, SC_ord, mpart_Diagonal, false); margs_ordered.Permute_UpperTriangular_To(rcs_cov, SC_ord.p_Get_Ordering(), SC_ord.n_Ordering_Size(), false); // no share! the original will be deleted } double f_rcs_inverse_time = 0; timer.Accum_DiffSample(f_rcs_inverse_time); m_f_time_cam_inverse += f_rcs_inverse_time; #ifdef __BA_MARGS_DO_MEMORY_PROFILING n_memory_cams = rcs_cov.n_Allocation_Size_NoLastPage(); // !! compute even if not in verbose for(size_t i = 0, n = rcs_cov.n_BlockColumn_Num(); i < n; ++ i) { size_t n_dof = rcs_cov.n_BlockColumn_Column_Num(i); _ASSERTE(n_memory_cams >= n_dof * n_dof * sizeof(double)); // make sure the below line does not underflow n_memory_cams -= n_dof * n_dof * sizeof(double); } // do not count the size of marginals we're trying to return! just count the unwanted off-diagonals #endif // __BA_MARGS_DO_MEMORY_PROFILING if(m_b_verbose) { printf("\trecursive inverse of camera SC took %f sec (recovered " PRIsize " blocks)\n", f_rcs_inverse_time, rcs_cov.n_Block_Num()); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\trecursive inverse of camera SC takes " PRIsizeB "B of memory\n", PRIsizeBparams(n_memory_cams)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } } else sp_cam_inv.Clear(); #ifdef __BA_MARGS_DO_MEMORY_PROFILING m_n_worst_memory = std::max(m_n_worst_memory, n_memory + n_memory_cams); #endif // __BA_MARGS_DO_MEMORY_PROFILING } }; #endif // !__BA_MARGINALS_INCLUDED
#pragma once #ifndef __BA_MARGINALS_INCLUDED #define __BA_MARGINALS_INCLUDED /** * @file include/slam/BAMarginals.h * @brief helper classes for covariance recovery in Schur-complemented systems * @author -tHE SWINe- * @date 2016-02-15 */ #include "slam/Marginals.h" #include "slam/MemUsage.h" // PRIsizeB /** * @def __BA_MARGS_DO_MEMORY_PROFILING * @brief if defined, memory use of the marginals will be profiled */ //#define __BA_MARGS_DO_MEMORY_PROFILING /** * @brief internal functions for Schur complement marginals */ namespace sc_margs_detail { // todo - use wrap3 below /** * @brief multiply add operation with a sparse matrix and a block vector */ class CBlockVectorMAD_Impl { public: struct TInnerContext { double *p_dest; const double *p_block; inline TInnerContext(double *_p_dest, const double *_p_block) :p_dest(_p_dest), p_block(_p_block) {} }; template <const int n_row_height, class CColumnWidth> class CInnerLoop { // not dependent on CBlockSizeList; don't make it an inner class of COuterLoop public: enum { n_column_width = CColumnWidth::n_size }; typedef typename CUberBlockMatrix::CMakeMatrixRef<n_column_width, n_column_width>::_Ty _TyDestMap; typedef typename CUberBlockMatrix::CMakeMatrixRef<n_row_height, n_column_width>::_TyConst _TyBlockMap; public: static inline void Do(TInnerContext t_ctx) { _TyBlockMap fbs_block(t_ctx.p_block); _TyDestMap(t_ctx.p_dest) += fbs_block.transpose().lazyProduct(fbs_block); // mad } }; struct TOuterContext { double *p_dest; const CUberBlockMatrix &r_block_vector; inline TOuterContext(double *_p_dest, const CUberBlockMatrix &_r_block_vector) :p_dest(_p_dest), r_block_vector(_r_block_vector) {} }; template <const int n_column_width, class CBlockSizeList> class COuterLoop { public: static inline void Do(TOuterContext t_ctx) { _ASSERTE(n_column_width == t_ctx.r_block_vector.n_BlockColumn_Column_Num(0)); for(size_t i = 0, n = t_ctx.r_block_vector.n_BlockColumn_Block_Num(0); i < n; ++ i) { CUberBlockMatrix::_TyMatrixXdRef t_block = const_cast<CUberBlockMatrix&>(t_ctx.r_block_vector).t_Block_AtColumn(0, i); // hack - need to cast, const_ref does not expose its pointer to data size_t n_row_height = t_block.rows(); _ASSERTE(n_column_width == t_block.cols()); fbs_ut::CWrap2<CInnerLoop, fbs_ut::CCTSize<n_column_width> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CBlockSizeList, n_column_width>(int(n_row_height), TInnerContext(t_ctx.p_dest, t_block.data()/*&t_block(0, 0)*/)); // mad } // for each nnz block } }; /*public: template <class CBlockSizeList> static void BlockVector_PreMultiplyWithSelfTranspose_Add_FBS(double *p_dest, const CUberBlockMatrix &r_block_vector) // g++ is unable to reference TOuterLoop from the outside for some reason { fbs_ut::CWrap2<COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>( r_block_vector.n_Column_Num(), TOuterContext(p_dest, r_block_vector)); // decide over vector width }*/ }; template <class CBlockSizeList, class CDestMatrix> inline void BlockVector_PreMultiplyWithSelfTranspose_Add_FBS(CDestMatrix &r_dest, const CUberBlockMatrix &r_block_vector) { _ASSERTE(r_block_vector.n_BlockColumn_Num() == 1); // it is a block column-vector _ASSERTE(r_block_vector.n_Column_Num() == r_dest.rows()); // the number of columns in the block vector matches the size of the destination _ASSERTE(r_dest.rows() == r_dest.cols()); // the output is square _ASSERTE(!(CDestMatrix::Flags & Eigen::RowMajor)); // must be column-major otherwise the conversion to pointer strips data //_ASSERTE(CDestMatrix::MaxColsAtCompileTime == Eigen::Dynamic || // CDestMatrix::MaxColsAtCompileTime == r_dest.cols() || // r_dest.cols() == 1); // the stride must be tight or there is a single col and it does not matter _ASSERTE(r_dest.cols() <= 1 || &r_dest(0, 1) == &r_dest(0, 0) + r_dest.rows()); // the stride must be tight or there is a single col and it does not matter // do not zero r_dest //CBlockVectorMAD_Impl::template BlockVector_PreMultiplyWithSelfTranspose_Add_FBS<CBlockSizeList>(r_dest.data(), r_block_vector); fbs_ut::CWrap2<CBlockVectorMAD_Impl::COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>(int(r_block_vector.n_Column_Num()), CBlockVectorMAD_Impl::TOuterContext(r_dest.data(), r_block_vector)); // decide over vector width } inline void Calculate_UpperTriangularTransposeSolve_Bases(CUberBlockMatrix &S_bases, const CUberBlockMatrix &S, /*const*/ cs *p_St, size_t n_column, Eigen::MatrixXd &r_workspace, std::vector<size_t> &r_workspace1) // this is inline, to avoid link conflicts { _ASSERTE(S.b_EqualLayout(S_bases)); // will yield a matrix with the same sparsity structure //S.CopyLayoutTo(S_bases); _ASSERTE(n_column < S.n_BlockColumn_Num()); // make sure the column is inside the matrix _ASSERTE(p_St->m == S.n_BlockRow_Num() && p_St->n == S.n_BlockColumn_Num()); // should be the same matrix _ASSERTE(sizeof(csi) == sizeof(size_t)); r_workspace1.resize(2 * p_St->n); // alloc workspace { //cs *p_St = cs_transpose(p_S, 0); // need p_St /*csi p_col[2] = {0, 1}; csi n_row = 0; cs B; B.m = p_St->m; B.n = 1; B.p = p_col; B.i = &n_row; B.x = 0; B.nzmax = 1; B.nz = -1;*/ // prepare a single entry CSC matrix //Eigen::Matrix<double, Eigen::Dynamic, 6> S_dense_basis(S.n_Row_Num(), 6); // todo - implement matrix solving and try to make this row major // unit basis matrix //for(size_t n_column = 0, n = S.n_BlockColumn_Num(); n_column < n; ++ n_column) { // t_odo - do this in parallel (probably explicit matrix reduction rather than locking) { //size_t n_column = n_column; size_t w = S.n_BlockColumn_Column_Num(n_column); // t_odo - FBS it //_ASSERTE(w == 6); //n_row = n_column; //size_t n_first_dep_col = cs_reach(p_St, &B, 0, (csi*)&r_workspace1.front(), 0); // modifies p_St but then puts it back size_t n_first_dep_col = cs_dfs(n_column, p_St, p_St->n, (csi*)&r_workspace1.front(), (csi*)&r_workspace1.front() + p_St->n, 0); // no need for B // todo - reimplement this directly on block matrices, try to avoid needing the transpose size_t n_dep_col_num = p_St->n - n_first_dep_col; const size_t *p_dep_col = &r_workspace1[n_first_dep_col]; for(size_t j = 0; j < n_dep_col_num; ++ j) CS_MARK(p_St->p, p_dep_col[j]); // restore col. pointers after calling cs_dfs // get the list of columns of S that affect block U_Dinv_{n_column, *} r_workspace.resize(S.n_Row_Num(), w); // alloc workspace Eigen::MatrixXd &S_dense_basis = r_workspace; // just rename //Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>, Eigen::Aligned> // S_dense_basis(r_workspace.data(), S.n_Row_Num(), w); // map the workspace as (column-wise) fixed-size matrix S_dense_basis.setZero(); S_dense_basis.middleRows(S_bases.n_BlockColumn_Base(n_column), w).setIdentity(); // create a vector of zeros for(size_t c = 0; c < w; ++ c) { //S.UpperTriangularTranspose_Solve(&U_Dinv_i_permd.col(c)(0), U_Dinv_i_permd.rows(), p_dep_col, n_dep_col_num); S.UpperTriangularTranspose_Solve/*_FBS<SC_BlockSizes>*/(&S_dense_basis.col(c)(0), S_dense_basis.rows(), p_dep_col, n_dep_col_num); } // sparse sparse UTTSolve for(size_t j = 0; j < n_dep_col_num; ++ j) { size_t n_row = p_dep_col[j]; size_t y = S_bases.n_BlockColumn_Base(n_row); size_t h = S_bases.n_BlockColumn_Column_Num(n_row); // those are rows but S_bases is symmetric #ifdef _DEBUG if(j > 0) { const size_t r_prev = p_dep_col[j - 1]; size_t y_prev = S_bases.n_BlockColumn_Base(r_prev); size_t h_prev = S_bases.n_BlockColumn_Column_Num(r_prev); size_t e_prev = y_prev + h_prev; _ASSERTE(S_dense_basis.middleRows(e_prev, y - e_prev).squaredNorm() == 0); // make sure there are zeros between consecutive (nonadjacent) blocks } else if(y > 0) _ASSERTE(S_dense_basis.topRows(y).squaredNorm() == 0); // make sure there are zeros above the first block if(j + 1 == n_dep_col_num) _ASSERTE(S_dense_basis.bottomRows(S_dense_basis.rows() - (y + h)).squaredNorm() == 0); // make sure there are zeros till the end // make sure that there are only zeros in between the elements #endif // _DEBUG //_ASSERTE(h == 6); //_ASSERTE(S_bases.n_BlockColumn_Column_Num(n_row) == 6); // t_odo - FBS it S_bases.t_GetBlock_Log(n_row, n_column, h, w) = S_dense_basis.middleRows(S_bases.n_BlockColumn_Base(n_row), h); // this is transposed (transpose the block as well?): each row is a single basis; this only works if the structure of S is symmetric } // sparse fill the bases matrix } //S_bases.Rasterize("S_bases.tga", 3); // ... } } /** * @brief FBS implementation for the upper triangular transpose solve of the sparse bases matrix */ class CUTTSolve_Bases_Impl { public: struct TInnerContext { CUberBlockMatrix &r_dest; const size_t n_column; const Eigen::MatrixXd &r_src; const size_t n_row; inline TInnerContext(CUberBlockMatrix &_r_dest, size_t _n_column, const Eigen::MatrixXd &_r_src, size_t _n_row) :r_dest(_r_dest), n_column(_n_column), r_src(_r_src), n_row(_n_row) {} }; template <const int n_row_height, class CColumnWidth> class CInnerLoop { public: enum { n_column_width = CColumnWidth::n_size }; public: static inline void Do(TInnerContext t_ctx) { _ASSERTE(t_ctx.r_src.rows() == t_ctx.r_dest.n_Row_Num()); _ASSERTE(n_column_width == t_ctx.r_dest.n_BlockColumn_Column_Num(t_ctx.n_column)); Eigen::Map<const Eigen::Matrix<double, Eigen::Dynamic, n_column_width>, Eigen::Aligned> S_dense_basis(t_ctx.r_src.data(), t_ctx.r_src.rows(), n_column_width); // map the source as (column-wise) fixed-size matrix Eigen::Map<Eigen::Matrix<double, n_row_height, n_column_width> > dest_block(t_ctx.r_dest.p_GetBlock_Log(t_ctx.n_row, t_ctx.n_column, n_row_height, n_column_width, true, false)); dest_block = S_dense_basis.template middleRows<n_row_height>(t_ctx.r_dest.n_BlockColumn_Base(t_ctx.n_row)); } }; struct TOuterContext { CUberBlockMatrix &r_S_bases; const size_t n_column; const CUberBlockMatrix &r_S; Eigen::MatrixXd &r_workspace; const size_t *p_dep_col; const size_t n_dep_num; inline TOuterContext(CUberBlockMatrix &_r_S_bases, size_t _n_column, const CUberBlockMatrix &_r_S, Eigen::MatrixXd &_r_workspace, const size_t *_p_dep_col, size_t _n_dep_num) :r_S_bases(_r_S_bases), n_column(_n_column), r_S(_r_S), r_workspace(_r_workspace), p_dep_col(_p_dep_col), n_dep_num(_n_dep_num) {} }; template <const int n_column_width, class CBlockSizeList> class COuterLoop { public: static inline void Do(TOuterContext t_ctx) { t_ctx.r_workspace.resize(t_ctx.r_S.n_Row_Num(), n_column_width); // alloc workspace Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, n_column_width>, Eigen::Aligned> S_dense_basis(t_ctx.r_workspace.data(), t_ctx.r_S.n_Row_Num(), n_column_width); // map the workspace as (column-wise) fixed-size matrix S_dense_basis.setZero(); S_dense_basis.template middleRows<n_column_width>(t_ctx.r_S.n_BlockColumn_Base(t_ctx.n_column)).setIdentity(); // create a vector of zeros for(size_t c = 0; c < n_column_width; ++ c) { // todo - make a version of UpperTriangularTranspose_Solve_FBS for vectors t_ctx.r_S.UpperTriangularTranspose_Solve_FBS<CBlockSizeList>(&S_dense_basis.col(c)(0), S_dense_basis.rows(), t_ctx.p_dep_col, t_ctx.n_dep_num); } // sparse sparse UTTSolve for(size_t j = 0; j < t_ctx.n_dep_num; ++ j) { size_t n_row = t_ctx.p_dep_col[j]; size_t h = t_ctx.r_S.n_BlockColumn_Column_Num(n_row); // those are rows but S_bases is symmetric #ifdef _DEBUG size_t y = t_ctx.r_S.n_BlockColumn_Base(n_row); if(j > 0) { const size_t r_prev = t_ctx.p_dep_col[j - 1]; size_t y_prev = t_ctx.r_S.n_BlockColumn_Base(r_prev); size_t h_prev = t_ctx.r_S.n_BlockColumn_Column_Num(r_prev); size_t e_prev = y_prev + h_prev; _ASSERTE(S_dense_basis.middleRows(e_prev, y - e_prev).squaredNorm() == 0); // make sure there are zeros between consecutive (nonadjacent) blocks } else if(y > 0) _ASSERTE(S_dense_basis.topRows(y).squaredNorm() == 0); // make sure there are zeros above the first block if(j + 1 == t_ctx.n_dep_num) _ASSERTE(S_dense_basis.bottomRows(S_dense_basis.rows() - (y + h)).squaredNorm() == 0); // make sure there are zeros till the end // make sure that there are only zeros in between the elements #endif // _DEBUG //_ASSERTE(h == 6); //_ASSERTE(S_bases.n_BlockColumn_Column_Num(n_row) == 6); // t_odo - FBS it //S_bases.t_GetBlock_Log(n_row, n_column, h, w) = // S_dense_basis.middleRows<6>(S_bases.n_BlockColumn_Base(n_row)); // this is transposed (transpose the block as well?): each row is a single basis; this only works if the structure of S is symmetric fbs_ut::CWrap2<CInnerLoop, fbs_ut::CCTSize<n_column_width> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CBlockSizeList, n_column_width>(int(h), TInnerContext(t_ctx.r_S_bases, t_ctx.n_column, t_ctx.r_workspace, n_row)); // use SSE to copy stuff around } // sparse fill the bases matrix } }; }; template <class CBlockSizeList> void Calculate_UpperTriangularTransposeSolve_Bases_FBS(CUberBlockMatrix &S_bases, const CUberBlockMatrix &S, /*const*/ cs *p_St, size_t n_column, Eigen::MatrixXd &r_workspace, std::vector<size_t> &r_workspace1) { _ASSERTE(S.b_EqualLayout(S_bases)); // will yield a matrix with the same sparsity structure //S.CopyLayoutTo(S_bases); _ASSERTE(n_column < S.n_BlockColumn_Num()); // make sure the column is inside the matrix _ASSERTE(p_St->m == S.n_BlockRow_Num() && p_St->n == S.n_BlockColumn_Num()); // should be the same matrix _ASSERTE(sizeof(csi) == sizeof(size_t)); r_workspace1.resize(2 * p_St->n); // alloc workspace size_t w = S.n_BlockColumn_Column_Num(n_column); // t_odo - FBS it //_ASSERTE(w == 6); //n_row = n_column; //size_t n_first_dep_col = cs_reach(p_St, &B, 0, (csi*)&r_workspace1.front(), 0); // modifies p_St but then puts it back size_t n_first_dep_col = cs_dfs(n_column, p_St, p_St->n, (csi*)&r_workspace1.front(), (csi*)&r_workspace1.front() + p_St->n, 0); // no need for B // todo - reimplement this directly on block matrices, try to avoid needing the transpose size_t n_dep_col_num = p_St->n - n_first_dep_col; const size_t *p_dep_col = &r_workspace1[n_first_dep_col]; for(size_t j = 0; j < n_dep_col_num; ++ j) CS_MARK(p_St->p, p_dep_col[j]); // restore col. pointers after calling cs_dfs // get the list of columns of S that affect block U_Dinv_{n_column, *} // note that this is FBS-independent fbs_ut::CWrap2<CUTTSolve_Bases_Impl::COuterLoop, CBlockSizeList>::template In_ColumnWidth_DecisionTree<CBlockSizeList>(int(w), CUTTSolve_Bases_Impl::TOuterContext(S_bases, n_column, S, r_workspace, p_dep_col, n_dep_col_num)); } } // ~sc_margs_detail template <class _SC_BlockSizes, class _U_BlockSizes, class _V_BlockSizes, class _D_BlockSizes> class CSchurComplement_Marginals { public: typedef _SC_BlockSizes SC_BlockSizes; typedef _SC_BlockSizes A_BlockSizes; // the same typedef _U_BlockSizes U_BlockSizes; typedef _V_BlockSizes V_BlockSizes; typedef _D_BlockSizes D_BlockSizes; typedef typename CUniqueTypelist<typename CConcatTypelist<typename CConcatTypelist<A_BlockSizes, U_BlockSizes>::_TyResult, typename CConcatTypelist<V_BlockSizes, D_BlockSizes>::_TyResult>::_TyResult>::_TyResult Lambda_BlockSizes; protected: bool m_b_verbose; mutable double m_f_time_Dinv_copy; mutable double m_f_time_sp_struct; mutable double m_f_time_S_bases; mutable double m_f_time_lm_inverse; mutable double m_f_time_cam_inverse; mutable double m_f_time_lambda_AMD; mutable double m_f_time_lambda_perm; mutable double m_f_time_lambda_Chol; mutable double m_f_time_lambda_recformula; mutable double m_f_time_lambda_unperm; mutable uint64_t m_n_worst_memory; public: CSchurComplement_Marginals(bool b_verbose = false) :m_b_verbose(b_verbose), m_f_time_Dinv_copy(0), m_f_time_sp_struct(0), m_f_time_S_bases(0), m_f_time_lm_inverse(0), m_f_time_cam_inverse(0), m_f_time_lambda_AMD(0), m_f_time_lambda_perm(0), m_f_time_lambda_Chol(0), m_f_time_lambda_recformula(0), m_f_time_lambda_unperm(0), m_n_worst_memory(0) {} inline bool b_Verbose() const { return m_b_verbose; } void Add_LambdaChol_Time(double f_time_AMD, double f_time_perm, double f_time_Chol) { m_f_time_lambda_AMD += f_time_AMD; m_f_time_lambda_perm += f_time_perm; m_f_time_lambda_Chol += f_time_Chol; } void Dump() const { printf("\trecursive margs took %f sec, out of which:\n", m_f_time_lambda_AMD + m_f_time_lambda_perm + m_f_time_lambda_Chol + m_f_time_lambda_recformula + m_f_time_lambda_unperm); printf("\t\t amd: %f\n", m_f_time_lambda_AMD); printf("\t\t perm: %f\n", m_f_time_lambda_perm); printf("\t\t Chol: %f\n", m_f_time_lambda_Chol); printf("\t\trform: %f\n", m_f_time_lambda_recformula); printf("\t\tunprm: %f\n", m_f_time_lambda_unperm); printf("\tSchur margs took %f sec, out of which:\n", m_f_time_Dinv_copy + m_f_time_sp_struct + m_f_time_S_bases + m_f_time_lm_inverse); printf("\t\tdinit: %f\n", m_f_time_Dinv_copy); printf("\t\tstruc: %f\n", m_f_time_sp_struct); printf("\t\tbases: %f\n", m_f_time_S_bases); printf("\t\t inv: %f\n", m_f_time_lm_inverse); printf("\trecursive inverse of cameras using SC took %f sec\n", m_f_time_cam_inverse); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\tSchur marginals took " PRIsizeB "B memory at most\n", PRIsizeBparams(m_n_worst_memory)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } bool Get_CholeskyLambda(CUberBlockMatrix &R, CMatrixOrdering &lam_ord, const CUberBlockMatrix &lambda) const { CTimer t; CTimerSampler timer(t); lam_ord.p_BlockOrdering(lambda, true); // w.r.t. lambda_perm const size_t *p_lam_ord = lam_ord.p_Get_InverseOrdering(); const size_t n_lam_ord_size = lam_ord.n_Ordering_Size(); // ordering for lambda double f_lambda_amd_time = 0; timer.Accum_DiffSample(f_lambda_amd_time); m_f_time_lambda_AMD += f_lambda_amd_time; CUberBlockMatrix lambda_amd; lambda.Permute_UpperTriangular_To(lambda_amd, p_lam_ord, n_lam_ord_size, true); double f_lambda_perm_time = 0; timer.Accum_DiffSample(f_lambda_perm_time); m_f_time_lambda_perm += f_lambda_perm_time; /*typedef CConcatTypelist<CConcatTypelist<SC_BlockSizes, U_BlockSizes>::_TyResult, CConcatTypelist<V_BlockSizes, D_BlockSizes>::_TyResult>::_TyResult Lambda_BlockSizes;*/ if(!R.CholeskyOf_FBS<Lambda_BlockSizes>(lambda_amd)) { fprintf(stderr, "error: got not pos def when factorizing lambda\n"); return false; } double f_lambda_chol_time = 0; timer.Accum_DiffSample(f_lambda_chol_time); m_f_time_lambda_Chol += f_lambda_chol_time; if(m_b_verbose) { printf("\tCholesky of lambda took %f sec, out of which:\n", f_lambda_chol_time + f_lambda_perm_time + f_lambda_amd_time); printf("\t\t amd: %f\n", f_lambda_amd_time); printf("\t\t perm: %f\n", f_lambda_perm_time); printf("\t\t Chol: %f, " PRIsize " elem. nnz (needs %.2f MB)\n", f_lambda_chol_time, R.n_NonZero_Num(), R.n_Allocation_Size_NoLastPage() / 1048576.0); } return true; } /** * @brief calculates block diagonal of the covariance matrix from a Schur-complemented system * * @param[out] margs_recursive is filled with the marginals upon return * @param[in] R is Cholesky factorization of the system matrix * @param[in] lam_ord is reference to the AMD ordering that was used for R * @param[in] Dinv is inverse of the (diagonal) landmark matrix * @param[in] U_Dinv is a product of the top off-diagonal block of the Schur complement and Dinv * @param[in] b_negative_Dinv_UDinv is negative flag (set if your Dinv and UDinv are negative) * * @note This function throw std::bad_alloc. */ void Recursive_Marginals(//CUberBlockMatrix &cam_cov_rec, CUberBlockMatrix &lm_cov_rec, CUberBlockMatrix &margs_recursive, const CUberBlockMatrix &R, const CMatrixOrdering &lam_ord) const // throw(std::bad_alloc) { CTimer t; CTimerSampler timer(t); double f_lambda_recformula_time = 0; { CUberBlockMatrix margs_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<Lambda_BlockSizes>(margs_ordered, R, lam_ord, mpart_Diagonal, false); // calculate the thing timer.Accum_DiffSample(f_lambda_recformula_time); m_f_time_lambda_recformula += f_lambda_recformula_time; margs_ordered.Permute_UpperTriangular_To(margs_recursive, lam_ord.p_Get_Ordering(), lam_ord.n_Ordering_Size(), false); // no share! the original will be deleted } double f_lambda_unperm_time = 0; timer.Accum_DiffSample(f_lambda_unperm_time); m_f_time_lambda_unperm += f_lambda_unperm_time; if(m_b_verbose) { printf("\trecursive margs took %f sec, out of which:\n", f_lambda_unperm_time + f_lambda_recformula_time /*+ f_lambda_chol_time + f_lambda_perm_time + f_lambda_amd_time*/); /*printf("\t amd: %.3f\n", f_lambda_amd_time); printf("\t perm: %.3f\n", f_lambda_perm_time); printf("\t Chol: %.3f, " PRIsize " elem. nnz (needs %.2f MB)\n", f_lambda_chol_time, R.n_NonZero_Num(), R.n_Allocation_Size_NoLastPage() / 1048576.0);*/ printf("\t\trform: %f\n", f_lambda_recformula_time); printf("\t\tunprm: %f\n", f_lambda_unperm_time); } /*margs_recursive.SliceTo(cam_cov_rec, 0, n_matrix_cut, 0, n_matrix_cut, true); margs_recursive.SliceTo(lm_cov_rec, n_matrix_cut, n_size, n_matrix_cut, n_size, true); // get the submatrices*/ } /** * @brief calculates block diagonal of the covariance matrix from a Schur-complemented system * * @param[out] sp_cam_inv is filled with camera marginals upon return * @param[in] b_do_cam_marginals is camera marginals flag (if set, sp_cam_inv is filled, otherwise it is empty) * @param[out] sp_lm_inv is filled with landmark marginals upon return * @param[in] S is Cholesky factorization of the Schur complement * @param[in] SC_ord is reference to the fill-reducing ordering that was used to factorize S * complement (this is *not* the ordering that separates landmarks and poses) * @param[in] n_SC_ord_size is size of the ordering that was used for Schur complement * @param[in] Dinv is inverse of the (diagonal) landmark matrix * @param[in] U_Dinv is a product of the top off-diagonal block of the Schur complement and Dinv * @param[in] b_negative_Dinv_UDinv is negative flag (set if your Dinv and UDinv are negative) * * @note This function throw std::bad_alloc. */ void Schur_Marginals(CUberBlockMatrix &sp_cam_inv, bool b_do_cam_marginals, CUberBlockMatrix &sp_lm_inv, const CUberBlockMatrix &S, const CMatrixOrdering &SC_ord, const CUberBlockMatrix &Dinv, const CUberBlockMatrix &U_Dinv, bool b_negative_Dinv_UDinv = false) const // throw(std::bad_alloc) { #ifdef __BA_MARGS_DO_MEMORY_PROFILING static bool b_warned = false; if(!b_warned) { fprintf(stderr, "warning: __BA_MARGS_DO_MEMORY_PROFILING defined! " "do not use this run for timing measurements\n"); b_warned = true; } #endif // __BA_MARGS_DO_MEMORY_PROFILING const size_t *p_SC_ord = SC_ord.p_Get_InverseOrdering(); const size_t n_SC_ord_size = SC_ord.n_Ordering_Size(); _ASSERTE(n_SC_ord_size == S.n_BlockColumn_Num()); // if this fails then the ordering is likely for the entire lambda; this is just the // ordering to get chol(shur(lambda)) CTimer t; CTimerSampler timer(t); sp_lm_inv = Dinv; // start with that if(b_negative_Dinv_UDinv) sp_lm_inv.Scale_FBS_Parallel<D_BlockSizes>(-1); // this needs to be negated; the rest of the product is squared so it cancels out double f_diag_init_time = 0; timer.Accum_DiffSample(f_diag_init_time); m_f_time_Dinv_copy += f_diag_init_time; cs *p_S = S.p_BlockStructure_to_Sparse(); CUberBlockMatrix U_Dinv_perm; U_Dinv.PermuteTo(U_Dinv_perm, p_SC_ord, n_SC_ord_size, true, false, true); // get a permuted view of U_Dinv (can be shared among the threads) cs *p_B = U_Dinv_perm.p_BlockStructure_to_Sparse(); // grab its structure (can be shared among the threads) double f_struct_time = 0; timer.Accum_DiffSample(f_struct_time); m_f_time_sp_struct += f_struct_time; CUberBlockMatrix S_bases; std::vector<CUberBlockMatrix> S_bases_thr_list; double f_parallel_S_bases_time = 0; #ifdef __BA_MARGS_DO_MEMORY_PROFILING uint64_t n_memory = 0; #endif // __BA_MARGS_DO_MEMORY_PROFILING #pragma omp parallel { #ifdef _OPENMP const size_t n_thread_num = omp_get_num_threads(); const size_t n_thread_id = omp_get_thread_num(); #else // _OPENMP static const size_t n_thread_num = 1; static const size_t n_thread_id = 0; #endif // _OPENMP #pragma omp master { S_bases_thr_list.resize(n_thread_num); } #pragma omp barrier // alloc partials in thread 0 std::vector<size_t> workspace(n_SC_ord_size * 2); // alloc thread-private workspace for cs_reach() cs *p_St_thr = cs_transpose(p_S, 0); // need a private copy of p_St for each thread { CUberBlockMatrix &S_bases_thr = S_bases_thr_list[n_thread_id]; // rename S.CopyLayoutTo(S_bases_thr); Eigen::MatrixXd/*<double, Eigen::Dynamic, 6>*/ S_dense_basis;//(S.n_Row_Num(), 6); // todo - implement matrix solving and try to make this row major // unit basis matrix const size_t n = S.n_BlockColumn_Num(); _ASSERTE(n <= INT_MAX); const int _n = int(n); #pragma omp for schedule(dynamic, 1) // t_odo - dynamic schedule? each column will likely have a different cost (todo - build histograms) for(int i = 0; i < _n; ++ i) { S_dense_basis.resize(S.n_Row_Num(), S.n_BlockColumn_Column_Num(i)); // handle different block sizes sc_margs_detail::Calculate_UpperTriangularTransposeSolve_Bases_FBS<SC_BlockSizes>(S_bases_thr, S, p_St_thr, i, S_dense_basis, workspace); // use the nice function instead } } #pragma omp barrier // wait for all the threads to compute their bases #pragma omp master { S_bases.Swap(S_bases_thr_list.front()); // start with 0 for(size_t i = 1, n = n_thread_num; i < n; ++ i) S_bases_thr_list[i].AddTo(S_bases); // no need for FBS, no two blocks will overlap // simple serial reduction in thread 0, could do a parallel one std::vector<CUberBlockMatrix> empty; S_bases_thr_list.swap(empty); timer.Accum_DiffSample(f_parallel_S_bases_time); m_f_time_S_bases += f_parallel_S_bases_time; } // reduce the bases #pragma omp barrier // synchronize the threads before continuing Eigen::Matrix3d lm_i_cov; Eigen::Matrix<double, Eigen::Dynamic, 3> U_Dinv_i_permd(U_Dinv.n_Row_Num(), 3); // those can stay allocated, the size does not change throughout the algorithm const size_t n = Dinv.n_BlockColumn_Num(); _ASSERTE(n <= INT_MAX); const int _n = int(n); #pragma omp for schedule(dynamic, 1) // t_odo - dynamic schedule? each column will likely have a different cost (todo - build histograms) for(int i = 0; i < _n; ++ i) { CUberBlockMatrix U_Dinv_i_perm; U_Dinv_perm.SliceTo(U_Dinv_i_perm, 0, U_Dinv_perm.n_BlockRow_Num(), i, i + 1, true); // grab a single column of U_Dinv_perm (via reference) CUberBlockMatrix SinvT_U_Dinv_i_perm; SinvT_U_Dinv_i_perm.ProductOf_FBS<SC_BlockSizes, U_BlockSizes>(S_bases, U_Dinv_i_perm); // gets a sparse matrix, the size of U_Dinv_i_perm #ifdef __BA_MARGS_DO_MEMORY_PROFILING size_t n_mem_col = SinvT_U_Dinv_i_perm.n_Allocation_Size_NoLastPage(); #pragma omp critical { n_memory = std::max(uint64_t(n_mem_col), n_memory); } #endif // __BA_MARGS_DO_MEMORY_PROFILING CUberBlockMatrix::_TyMatrixXdRef t_lminv_ii = sp_lm_inv.t_GetBlock_Log(i, i); sc_margs_detail::BlockVector_PreMultiplyWithSelfTranspose_Add_FBS<U_BlockSizes>(t_lminv_ii, SinvT_U_Dinv_i_perm); } cs_spfree(p_St_thr); } // calculate block diagonal covariances of only the landmarks #ifdef __BA_MARGS_DO_MEMORY_PROFILING n_memory += CSparseMatrixMemInfo::n_Allocation_Size(p_S) + CSparseMatrixMemInfo::n_Allocation_Size(p_B); //n_memory += sp_lm_inv.n_Allocation_Size_NoLastPage(); // the marginals themselves // not! #endif // __BA_MARGS_DO_MEMORY_PROFILING cs_spfree(p_S); cs_spfree(p_B); double f_inverse_time = 0, f_lminv_time = 0; timer.Accum_DiffSample(f_inverse_time); m_f_time_lm_inverse += f_inverse_time; f_lminv_time = f_struct_time + f_diag_init_time + f_parallel_S_bases_time + f_inverse_time; if(m_b_verbose) { printf("\tSchur margs took %f sec, out of which:\n", f_lminv_time); printf("\t\tstruc: %f\n", f_struct_time); printf("\t\tbases: %f (%.2f %% sparsity, S has %.2f %%, needed %.2f MB)\n", f_parallel_S_bases_time, 100 * double(S_bases.n_Block_Num() * 6 * 6) / (S_bases.n_BlockColumn_Num() * S_bases.n_BlockColumn_Num() * 6 * 6), 100 * double(S.n_Block_Num() * 6 * 6) / (S.n_BlockColumn_Num() * S.n_BlockColumn_Num() * 6 * 6), S_bases.n_Allocation_Size_NoLastPage() / 1048576.0); printf("\t\tdinit: %f\n", f_diag_init_time); printf("\t\t inv: %f\n", f_inverse_time); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\tSchur margs took " PRIsizeB "B of memory\n", PRIsizeBparams(n_memory)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } double f_stats_time = 0; timer.Accum_DiffSample(f_stats_time); #ifdef __BA_MARGS_DO_MEMORY_PROFILING uint64_t n_memory_cams = 0; #endif // __BA_MARGS_DO_MEMORY_PROFILING if(b_do_cam_marginals) { CUberBlockMatrix &rcs_cov = sp_cam_inv; // just rename { CUberBlockMatrix margs_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<SC_BlockSizes>(margs_ordered, S, SC_ord, mpart_Diagonal, false); margs_ordered.Permute_UpperTriangular_To(rcs_cov, SC_ord.p_Get_Ordering(), SC_ord.n_Ordering_Size(), false); // no share! the original will be deleted } double f_rcs_inverse_time = 0; timer.Accum_DiffSample(f_rcs_inverse_time); m_f_time_cam_inverse += f_rcs_inverse_time; #ifdef __BA_MARGS_DO_MEMORY_PROFILING n_memory_cams = rcs_cov.n_Allocation_Size_NoLastPage(); // !! compute even if not in verbose for(size_t i = 0, n = rcs_cov.n_BlockColumn_Num(); i < n; ++ i) { size_t n_dof = rcs_cov.n_BlockColumn_Column_Num(i); _ASSERTE(n_memory_cams >= n_dof * n_dof * sizeof(double)); // make sure the below line does not underflow n_memory_cams -= n_dof * n_dof * sizeof(double); } // do not count the size of marginals we're trying to return! just count the unwanted off-diagonals #endif // __BA_MARGS_DO_MEMORY_PROFILING if(m_b_verbose) { printf("\trecursive inverse of camera SC took %f sec (recovered " PRIsize " blocks)\n", f_rcs_inverse_time, rcs_cov.n_Block_Num()); #ifdef __BA_MARGS_DO_MEMORY_PROFILING printf("\trecursive inverse of camera SC takes " PRIsizeB "B of memory\n", PRIsizeBparams(n_memory_cams)); #endif // __BA_MARGS_DO_MEMORY_PROFILING } } else sp_cam_inv.Clear(); #ifdef __BA_MARGS_DO_MEMORY_PROFILING m_n_worst_memory = std::max(m_n_worst_memory, n_memory + n_memory_cams); #endif // __BA_MARGS_DO_MEMORY_PROFILING } }; #endif // !__BA_MARGINALS_INCLUDED
GB_unaryop__ainv_int64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int64_int16 // op(A') function: GB_tran__ainv_int64_int16 // C type: int64_t // A type: int16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int64_int16 ( int64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int64_int16 // op(A') function: GB_tran__ainv_int64_int16 // C type: int64_t // A type: int16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int64_int16 ( int64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int64_int16 // op(A') function: GB_tran__ainv_int64_int16 // C type: int64_t // A type: int16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int64_int16 ( int64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-30,32)),ceild(3*t1-62,64)),ceild(24*t2-Nz-243,256)),ceild(8*t3-Ny-243,256));t4<=min(min(min(min(floord(4*Nt+Nx-9,256),floord(12*t1+Nx+15,256)),floord(24*t2+Nx+11,256)),floord(8*t3+Nx-5,256)),floord(24*t1-24*t2+Nz+Nx+13,256));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),64*t4+62);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 3); t1++) { lbp = max(ceild(t1, 2), ceild(6 * t1 - Nt + 2, 6)); ubp = min(floord(4 * Nt + Nz - 9, 24), floord(12 * t1 + Nz + 6, 24)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(3 * t1, 2)), ceild(24 * t2 - Nz + 5, 8)), 3 * t1 - 3 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(12 * t1 + Ny + 15, 8)), floord(24 * t2 + Ny + 11, 8)), floord(24 * t1 - 24 * t2 + Nz + Ny + 13, 8)); t3++) { for (t4 = max(max(max(max(0, ceild(3 * t1 - 3 * t2 - 30, 32)), ceild(3 * t1 - 62, 64)), ceild(24 * t2 - Nz - 243, 256)), ceild(8 * t3 - Ny - 243, 256)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 256), floord(12 * t1 + Nx + 15, 256)), floord(24 * t2 + Nx + 11, 256)), floord(8 * t3 + Nx - 5, 256)), floord(24 * t1 - 24 * t2 + Nz + Nx + 13, 256)); t4++) { for (t5 = max(max(max(max(max(0, ceild(24 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(256 * t4 - Nx + 5, 4)), 3 * t1), 6 * t1 - 6 * t2 + 1); t5 <= min(min(min(min(min(floord(24 * t1 - 24 * t2 + Nz + 18, 4), 2 * t3), Nt - 1), 3 * t1 + 5), 6 * t2 + 4), 64 * t4 + 62); t5++) { for (t6 = max(max(24 * t2, 4 * t5 + 4), -24 * t1 + 24 * t2 + 8 * t5 - 23); t6 <= min(min(24 * t2 + 23, -24 * t1 + 24 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(256 * t4, 4 * t5 + 4); ubv = min(256 * t4 + 255, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 3); t1++) { lbp = max(ceild(t1, 2), ceild(6 * t1 - Nt + 2, 6)); ubp = min(floord(4 * Nt + Nz - 9, 24), floord(12 * t1 + Nz + 6, 24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(0, ceild(3 * t1, 2)), ceild(24 * t2 - Nz + 5, 8)), 3 * t1 - 3 * t2 + 1); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 8), floord(12 * t1 + Ny + 15, 8)), floord(24 * t2 + Ny + 11, 8)), floord(24 * t1 - 24 * t2 + Nz + Ny + 13, 8)); t3++) { for (t4 = max(max(max(max(0, ceild(3 * t1 - 3 * t2 - 30, 32)), ceild(3 * t1 - 62, 64)), ceild(24 * t2 - Nz - 243, 256)), ceild(8 * t3 - Ny - 243, 256)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 256), floord(12 * t1 + Nx + 15, 256)), floord(24 * t2 + Nx + 11, 256)), floord(8 * t3 + Nx - 5, 256)), floord(24 * t1 - 24 * t2 + Nz + Nx + 13, 256)); t4++) { for (t5 = max(max(max(max(max(0, ceild(24 * t2 - Nz + 5, 4)), ceild(8 * t3 - Ny + 5, 4)), ceild(256 * t4 - Nx + 5, 4)), 3 * t1), 6 * t1 - 6 * t2 + 1); t5 <= min(min(min(min(min(floord(24 * t1 - 24 * t2 + Nz + 18, 4), 2 * t3), Nt - 1), 3 * t1 + 5), 6 * t2 + 4), 64 * t4 + 62); t5++) { for (t6 = max(max(24 * t2, 4 * t5 + 4), -24 * t1 + 24 * t2 + 8 * t5 - 23); t6 <= min(min(24 * t2 + 23, -24 * t1 + 24 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(8 * t3, 4 * t5 + 4); t7 <= min(8 * t3 + 7, 4 * t5 + Ny - 5); t7++) { lbv = max(256 * t4, 4 * t5 + 4); ubv = min(256 * t4 + 255, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
weightedNorm2Many.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(weightedNorm2Many)(const dlong & Nblocks, const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) reduction(+:wa2) #endif for(int fld=0;fld<Nfields;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i]; wa2 += ai*ai*wi; } } cpu_wa[0] = wa2; }
extern "C" void FUNC(weightedNorm2Many) (const dlong & Nblocks, const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa) { dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #endif for (int fld = 0; fld < Nfields; fld++) { for (int i = 0; i < N; ++i) { const dlong id = i + fld * offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i]; wa2 += ai * ai * wi; } } cpu_wa[0] = wa2; }
extern "C" void FUNC(weightedNorm2Many) (const dlong & Nblocks, const dlong & N, const dlong & Nfields, const dlong & offset, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa) { dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for collapse(2) reduction(+:wa2) #endif for (int fld = 0; fld < Nfields; fld++) { for (int i = 0; i < N; ++i) { const dlong id = i + fld * offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i]; wa2 += ai * ai * wi; } } cpu_wa[0] = wa2; }
deconvolution_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4to1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } __m128 _sum = _mm_setzero_ps(); const float* kptr = weight_data_packed.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 4; int k = y * kernel_w + x; __m128 _val = _mm_load_ps(sptr); __m128 _w = _mm_load_ps(kptr + k * 4); _sum = _mm_comp_fmadd_ps(_val, _w, _sum); } } kptr += maxk * 4; } sum += _mm_reduce_add_ps(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[0] = sum; outptr++; } } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2022 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4to1_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_packed, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float *bias_data_ptr = bias_data; //num_output for (int p = 0; p < outch; p++) { float *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } __m128 _sum = _mm_setzero_ps(); const float *kptr = weight_data_packed.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float *sptr = m.row(sy) + sx * 4; int k = y * kernel_w + x; __m128 _val = _mm_load_ps(sptr); __m128 _w = _mm_load_ps(kptr + k * 4); _sum = _mm_comp_fmadd_ps(_val, _w, _sum); } } kptr += maxk * 4; } sum += _mm_reduce_add_ps(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[0] = sum; outptr++; } } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2022 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4to1_sse(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_packed, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float *bias_data_ptr = bias_data; //num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } __m128 _sum = _mm_setzero_ps(); const float *kptr = weight_data_packed.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float *sptr = m.row(sy) + sx * 4; int k = y * kernel_w + x; __m128 _val = _mm_load_ps(sptr); __m128 _w = _mm_load_ps(kptr + k * 4); _sum = _mm_comp_fmadd_ps(_val, _w, _sum); } } kptr += maxk * 4; } sum += _mm_reduce_add_ps(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[0] = sum; outptr++; } } } }
cg.c
struct __sFILEX ; int printf(const char *restrict , ...); void exit(int ); extern double fabs(double ); extern double pow(double , double ); extern double sqrt(double ); typedef int boolean; extern double randlc(double *, double ); extern void timer_clear(int ); extern void timer_start(int ); extern void timer_stop(int ); extern double timer_read(int ); extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand); static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; static int colidx[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static int rowstr[1400 + 1 + 1]; static int iv[2 * 1400 + 1 + 1]; static int arow[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static int acol[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double v[1400 + 1 + 1]; static double aelt[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double a[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double x[1400 + 2 + 1]; static double z[1400 + 2 + 1]; static double p[1400 + 2 + 1]; static double q[1400 + 2 + 1]; static double r[1400 + 2 + 1]; static double amult; static double tran; static void conj_grad(int colidx[], int rowstr[] , double x[] , double z[] , double a[] , double p[] , double q[] , double r[] , double *rnorm); static void makea(int n, int nz , double a[] , int colidx[] , int rowstr[] , int nonzer , int firstrow , int lastrow , int firstcol , int lastcol , double rcond , int arow[] , int acol[] , double aelt[] , double v[] , int iv[] , double shift); static void sparse(double a[], int colidx[] , int rowstr[] , int n , int arow[] , int acol[] , double aelt[] , int firstrow , int lastrow , double x[] , boolean mark[] , int nzloc[] , int nnza); static void sprnvc(int n, int nz , double v[] , int iv[] , int nzloc[] , int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[] , int iv[] , int *nzv , int i , double val); static int callcount = 0; int main(int argc, char **argv) { int i; int j; int k; int it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t; double mflops; char class; boolean verified; double zeta_verify_value; double epsilon; firstrow = 1; lastrow = 1400; firstcol = 1; lastcol = 1400; int _imopVarPre143; int _imopVarPre144; int _imopVarPre145; _imopVarPre143 = 1400 == 1400; if (_imopVarPre143) { _imopVarPre144 = 7 == 7; if (_imopVarPre144) { _imopVarPre145 = 15 == 15; if (_imopVarPre145) { _imopVarPre145 = 10.0 == 10.0; } _imopVarPre144 = _imopVarPre145; } _imopVarPre143 = _imopVarPre144; } if (_imopVarPre143) { class = 'S'; zeta_verify_value = 8.5971775078648; } else { int _imopVarPre149; int _imopVarPre150; int _imopVarPre151; _imopVarPre149 = 1400 == 7000; if (_imopVarPre149) { _imopVarPre150 = 7 == 8; if (_imopVarPre150) { _imopVarPre151 = 15 == 15; if (_imopVarPre151) { _imopVarPre151 = 10.0 == 12.0; } _imopVarPre150 = _imopVarPre151; } _imopVarPre149 = _imopVarPre150; } if (_imopVarPre149) { class = 'W'; zeta_verify_value = 10.362595087124; } else { int _imopVarPre155; int _imopVarPre156; int _imopVarPre157; _imopVarPre155 = 1400 == 14000; if (_imopVarPre155) { _imopVarPre156 = 7 == 11; if (_imopVarPre156) { _imopVarPre157 = 15 == 15; if (_imopVarPre157) { _imopVarPre157 = 10.0 == 20.0; } _imopVarPre156 = _imopVarPre157; } _imopVarPre155 = _imopVarPre156; } if (_imopVarPre155) { class = 'A'; zeta_verify_value = 17.130235054029; } else { int _imopVarPre161; int _imopVarPre162; int _imopVarPre163; _imopVarPre161 = 1400 == 75000; if (_imopVarPre161) { _imopVarPre162 = 7 == 13; if (_imopVarPre162) { _imopVarPre163 = 15 == 75; if (_imopVarPre163) { _imopVarPre163 = 10.0 == 60.0; } _imopVarPre162 = _imopVarPre163; } _imopVarPre161 = _imopVarPre162; } if (_imopVarPre161) { class = 'B'; zeta_verify_value = 22.712745482631; } else { int _imopVarPre167; int _imopVarPre168; int _imopVarPre169; _imopVarPre167 = 1400 == 150000; if (_imopVarPre167) { _imopVarPre168 = 7 == 15; if (_imopVarPre168) { _imopVarPre169 = 15 == 75; if (_imopVarPre169) { _imopVarPre169 = 10.0 == 110.0; } _imopVarPre168 = _imopVarPre169; } _imopVarPre167 = _imopVarPre168; } if (_imopVarPre167) { class = 'C'; zeta_verify_value = 28.973605592845; } else { class = 'U'; } } } } } printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", 1400); printf(" Iterations: %5d\n", 15); naa = 1400; nzz = 1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2); tran = 314159265.0; amult = 1220703125.0; double *_imopVarPre171; double _imopVarPre172; _imopVarPre171 = &tran; _imopVarPre172 = randlc(_imopVarPre171, amult); zeta = _imopVarPre172; int n; int nz; int nonzer; double rcond; double shift; n = naa; nz = nzz; nonzer = 7; rcond = 1.0e-1; shift = 10.0; int i_imopVarPre77; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; double size; double ratio; double scale; int jcol; size = 1.0; double _imopVarPre189; double _imopVarPre190; _imopVarPre189 = (1.0 / (double) n); _imopVarPre190 = pow(rcond, _imopVarPre189); ratio = _imopVarPre190; nnza = 0; #pragma omp parallel default(shared) private(i_imopVarPre77) { #pragma omp for nowait for (i_imopVarPre77 = 1; i_imopVarPre77 <= n; i_imopVarPre77++) { colidx[n + i_imopVarPre77] = 0; } } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; int *_imopVarPre193; int *_imopVarPre194; _imopVarPre193 = &(colidx[n]); _imopVarPre194 = &(colidx[0]); sprnvc(n, nzv, v, iv, _imopVarPre194, _imopVarPre193); int *_imopVarPre196; _imopVarPre196 = &nzv; vecset(n, v, iv, _imopVarPre196, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; int _imopVarPre198; _imopVarPre198 = jcol >= firstcol; if (_imopVarPre198) { _imopVarPre198 = jcol <= lastcol; } if (_imopVarPre198) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; int _imopVarPre200; _imopVarPre200 = irow >= firstrow; if (_imopVarPre200) { _imopVarPre200 = irow <= lastrow; } if (_imopVarPre200) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } for (i_imopVarPre77 = firstrow; i_imopVarPre77 <= lastrow; i_imopVarPre77++) { int _imopVarPre202; _imopVarPre202 = i_imopVarPre77 >= firstcol; if (_imopVarPre202) { _imopVarPre202 = i_imopVarPre77 <= lastcol; } if (_imopVarPre202) { iouter = n + i_imopVarPre77; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i_imopVarPre77; arow[nnza] = i_imopVarPre77; aelt[nnza] = rcond - shift; } } int *_imopVarPre205; int *_imopVarPre206; _imopVarPre205 = &(iv[n]); _imopVarPre206 = &(iv[0]); double *x_imopVarPre75; int *mark; int *nzloc; x_imopVarPre75 = v; mark = _imopVarPre206; nzloc = _imopVarPre205; int nrows; int i_imopVarPre76; int j_imopVarPre78; int jajp1; int nza; int k_imopVarPre79; int nzrow; double xi; nrows = lastrow - firstrow + 1; #pragma omp parallel default(shared) private(j_imopVarPre78) { #pragma omp for nowait for (j_imopVarPre78 = 1; j_imopVarPre78 <= n; j_imopVarPre78++) { rowstr[j_imopVarPre78] = 0; mark[j_imopVarPre78] = 0; } } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j_imopVarPre78 = (arow[nza] - firstrow + 1) + 1; rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + 1; } rowstr[1] = 1; for (j_imopVarPre78 = 2; j_imopVarPre78 <= nrows + 1; j_imopVarPre78++) { rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + rowstr[j_imopVarPre78 - 1]; } #pragma omp parallel default(shared) private(k_imopVarPre79, j_imopVarPre78) { #pragma omp for nowait for (j_imopVarPre78 = 0; j_imopVarPre78 <= nrows - 1; j_imopVarPre78++) { for (k_imopVarPre79 = rowstr[j_imopVarPre78]; k_imopVarPre79 <= rowstr[j_imopVarPre78 + 1] - 1; k_imopVarPre79++) { a[k_imopVarPre79] = 0.0; } } } for (nza = 1; nza <= nnza; nza++) { j_imopVarPre78 = arow[nza] - firstrow + 1; k_imopVarPre79 = rowstr[j_imopVarPre78]; a[k_imopVarPre79] = aelt[nza]; colidx[k_imopVarPre79] = acol[nza]; rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + 1; } for (j_imopVarPre78 = nrows; j_imopVarPre78 >= 1; j_imopVarPre78--) { rowstr[j_imopVarPre78 + 1] = rowstr[j_imopVarPre78]; } rowstr[1] = 1; nza = 0; #pragma omp parallel default(shared) private(i_imopVarPre76) { #pragma omp for nowait for (i_imopVarPre76 = 1; i_imopVarPre76 <= n; i_imopVarPre76++) { x_imopVarPre75[i_imopVarPre76] = 0.0; mark[i_imopVarPre76] = 0; } } jajp1 = rowstr[1]; for (j_imopVarPre78 = 1; j_imopVarPre78 <= nrows; j_imopVarPre78++) { nzrow = 0; for (k_imopVarPre79 = jajp1; k_imopVarPre79 < rowstr[j_imopVarPre78 + 1]; k_imopVarPre79++) { i_imopVarPre76 = colidx[k_imopVarPre79]; x_imopVarPre75[i_imopVarPre76] = x_imopVarPre75[i_imopVarPre76] + a[k_imopVarPre79]; int _imopVarPre208; _imopVarPre208 = mark[i_imopVarPre76] == 0; if (_imopVarPre208) { _imopVarPre208 = x_imopVarPre75[i_imopVarPre76] != 0.0; } if (_imopVarPre208) { mark[i_imopVarPre76] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i_imopVarPre76; } } for (k_imopVarPre79 = 1; k_imopVarPre79 <= nzrow; k_imopVarPre79++) { i_imopVarPre76 = nzloc[k_imopVarPre79]; mark[i_imopVarPre76] = 0; xi = x_imopVarPre75[i_imopVarPre76]; x_imopVarPre75[i_imopVarPre76] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i_imopVarPre76; } } jajp1 = rowstr[j_imopVarPre78 + 1]; rowstr[j_imopVarPre78 + 1] = nza + rowstr[1]; } #pragma omp parallel default(shared) private(i, j, k) { #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j + 1]; k++) { colidx[k] = colidx[k] - firstcol + 1; } } #pragma omp for nowait for (i = 1; i <= 1400 + 1; i++) { x[i] = 1.0; } #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } } zeta = 0.0; for (it = 1; it <= 1; it++) { double sum; double rho; double rho0; double alpha; double beta; double *rnorm_imopVarPre80; int j_imopVarPre81; int k_imopVarPre82; int cgit; int cgitmax = 25; double d; #pragma omp parallel default(shared) private(j_imopVarPre81, sum) shared(rho, naa) { double *_imopVarPre174; #pragma omp master { _imopVarPre174 = &rnorm; rnorm_imopVarPre80 = _imopVarPre174; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= naa + 1; j_imopVarPre81++) { q[j_imopVarPre81] = 0.0; z[j_imopVarPre81] = 0.0; r[j_imopVarPre81] = x[j_imopVarPre81]; p[j_imopVarPre81] = r[j_imopVarPre81]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:rho) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { rho = rho + r[j_imopVarPre81] * r[j_imopVarPre81]; } } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp parallel default(shared) private(j_imopVarPre81, k_imopVarPre82, sum, alpha, beta) shared(d, rho0, rho) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastrow - firstrow + 1; j_imopVarPre81++) { sum = 0.0; for (k_imopVarPre82 = rowstr[j_imopVarPre81]; k_imopVarPre82 < rowstr[j_imopVarPre81 + 1]; k_imopVarPre82++) { sum = sum + a[k_imopVarPre82] * p[colidx[k_imopVarPre82]]; } q[j_imopVarPre81] = sum; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:d) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { d = d + p[j_imopVarPre81] * q[j_imopVarPre81]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier alpha = rho0 / d; #pragma omp for reduction(+:rho) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { z[j_imopVarPre81] = z[j_imopVarPre81] + alpha * p[j_imopVarPre81]; r[j_imopVarPre81] = r[j_imopVarPre81] - alpha * q[j_imopVarPre81]; rho = rho + r[j_imopVarPre81] * r[j_imopVarPre81]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier beta = rho / rho0; #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { p[j_imopVarPre81] = r[j_imopVarPre81] + beta * p[j_imopVarPre81]; } callcount++; } } #pragma omp parallel default(shared) private(j_imopVarPre81, d, j) shared(sum) { #pragma omp master { sum = 0.0; } double _imopVarPre187; #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastrow - firstrow + 1; j_imopVarPre81++) { d = 0.0; for (k_imopVarPre82 = rowstr[j_imopVarPre81]; k_imopVarPre82 <= rowstr[j_imopVarPre81 + 1] - 1; k_imopVarPre82++) { d = d + a[k_imopVarPre82] * z[colidx[k_imopVarPre82]]; } r[j_imopVarPre81] = d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:sum) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { d = x[j_imopVarPre81] - r[j_imopVarPre81]; sum = sum + d * d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre187 = sqrt(sum); (*rnorm_imopVarPre80) = _imopVarPre187; } #pragma omp master { norm_temp11 = 0.0; norm_temp12 = 0.0; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier double _imopVarPre176; #pragma omp for reduction(+:norm_temp11, norm_temp12) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre176 = sqrt(norm_temp12); norm_temp12 = 1.0 / _imopVarPre176; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { x[j] = norm_temp12 * z[j]; } } } #pragma omp parallel default(shared) private(i) { #pragma omp for nowait for (i = 1; i <= 1400 + 1; i++) { x[i] = 1.0; } } zeta = 0.0; timer_clear(1); timer_start(1); for (it = 1; it <= 15; it++) { double rho0; double alpha; double beta; double *rnorm_imopVarPre83; int j_imopVarPre84; int k_imopVarPre85; int cgit; int cgitmax = 25; double d; double sum; double rho; #pragma omp parallel default(shared) private(j_imopVarPre84, sum) shared(rho, naa) { double *_imopVarPre178; #pragma omp master { _imopVarPre178 = &rnorm; rnorm_imopVarPre83 = _imopVarPre178; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= naa + 1; j_imopVarPre84++) { q[j_imopVarPre84] = 0.0; z[j_imopVarPre84] = 0.0; r[j_imopVarPre84] = x[j_imopVarPre84]; p[j_imopVarPre84] = r[j_imopVarPre84]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:rho) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { rho = rho + r[j_imopVarPre84] * r[j_imopVarPre84]; } } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp parallel default(shared) private(j_imopVarPre84, k_imopVarPre85, sum, alpha, beta) shared(d, rho0, rho) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastrow - firstrow + 1; j_imopVarPre84++) { sum = 0.0; for (k_imopVarPre85 = rowstr[j_imopVarPre84]; k_imopVarPre85 < rowstr[j_imopVarPre84 + 1]; k_imopVarPre85++) { sum = sum + a[k_imopVarPre85] * p[colidx[k_imopVarPre85]]; } q[j_imopVarPre84] = sum; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:d) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { d = d + p[j_imopVarPre84] * q[j_imopVarPre84]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier alpha = rho0 / d; #pragma omp for reduction(+:rho) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { z[j_imopVarPre84] = z[j_imopVarPre84] + alpha * p[j_imopVarPre84]; r[j_imopVarPre84] = r[j_imopVarPre84] - alpha * q[j_imopVarPre84]; rho = rho + r[j_imopVarPre84] * r[j_imopVarPre84]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier beta = rho / rho0; #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { p[j_imopVarPre84] = r[j_imopVarPre84] + beta * p[j_imopVarPre84]; } callcount++; } } #pragma omp parallel default(shared) private(j_imopVarPre84, d, j) shared(sum) { #pragma omp master { sum = 0.0; } double _imopVarPre187; #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastrow - firstrow + 1; j_imopVarPre84++) { d = 0.0; for (k_imopVarPre85 = rowstr[j_imopVarPre84]; k_imopVarPre85 <= rowstr[j_imopVarPre84 + 1] - 1; k_imopVarPre85++) { d = d + a[k_imopVarPre85] * z[colidx[k_imopVarPre85]]; } r[j_imopVarPre84] = d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:sum) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { d = x[j_imopVarPre84] - r[j_imopVarPre84]; sum = sum + d * d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre187 = sqrt(sum); (*rnorm_imopVarPre83) = _imopVarPre187; } #pragma omp master { norm_temp11 = 0.0; norm_temp12 = 0.0; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier double _imopVarPre180; #pragma omp for reduction(+:norm_temp11, norm_temp12) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre180 = sqrt(norm_temp12); norm_temp12 = 1.0 / _imopVarPre180; zeta = 10.0 + 1.0 / norm_temp11; if (it == 1) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { x[j] = norm_temp12 * z[j]; } } } #pragma omp parallel { } timer_stop(1); t = timer_read(1); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (class != 'U') { double _imopVarPre183; double _imopVarPre184; _imopVarPre183 = zeta - zeta_verify_value; _imopVarPre184 = fabs(_imopVarPre183); if (_imopVarPre184 <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); double _imopVarPre186; _imopVarPre186 = zeta - zeta_verify_value; printf(" Error is %20.12e\n", _imopVarPre186); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { mflops = (2.0 * 15 * 1400) * (3.0 + (7 * (7 + 1)) + 25.0 * (5.0 + (7 * (7 + 1))) + 3.0) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG", class, 1400, 0, 0, 15, nthreads, t, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randdp"); } static void conj_grad(int colidx[], int rowstr[] , double x[] , double z[] , double a[] , double p[] , double q[] , double r[] , double *rnorm) { int j; int k; int cgit; int cgitmax = 25; double d; double sum; double rho; double rho0; double alpha; double beta; #pragma omp parallel default(shared) private(j, sum) shared(rho, naa) { #pragma omp master { rho = 0.0; } #pragma omp for nowait for (j = 1; j <= naa + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:rho) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { rho = rho + r[j] * r[j]; } } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp parallel default(shared) private(j, k, sum, alpha, beta) shared(d, rho0, rho) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j + 1]; k++) { sum = sum + a[k] * p[colidx[k]]; } q[j] = sum; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:d) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { d = d + p[j] * q[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier alpha = rho0 / d; #pragma omp for reduction(+:rho) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha * p[j]; r[j] = r[j] - alpha * q[j]; rho = rho + r[j] * r[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier beta = rho / rho0; #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { p[j] = r[j] + beta * p[j]; } callcount++; } } #pragma omp parallel default(shared) private(j, d) shared(sum) { #pragma omp master { sum = 0.0; } double _imopVarPre187; #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { d = d + a[k] * z[colidx[k]]; } r[j] = d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:sum) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { d = x[j] - r[j]; sum = sum + d * d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre187 = sqrt(sum); (*rnorm) = _imopVarPre187; } } } static void makea(int n, int nz , double a[] , int colidx[] , int rowstr[] , int nonzer , int firstrow , int lastrow , int firstcol , int lastcol , double rcond , int arow[] , int acol[] , double aelt[] , double v[] , int iv[] , double shift) { int i; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; double size; double ratio; double scale; int jcol; size = 1.0; double _imopVarPre189; double _imopVarPre190; _imopVarPre189 = (1.0 / (double) n); _imopVarPre190 = pow(rcond, _imopVarPre189); ratio = _imopVarPre190; nnza = 0; #pragma omp parallel default(shared) private(i) { #pragma omp for nowait for (i = 1; i <= n; i++) { colidx[n + i] = 0; } } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; int *_imopVarPre193; int *_imopVarPre194; _imopVarPre193 = &(colidx[n]); _imopVarPre194 = &(colidx[0]); sprnvc(n, nzv, v, iv, _imopVarPre194, _imopVarPre193); int *_imopVarPre196; _imopVarPre196 = &nzv; vecset(n, v, iv, _imopVarPre196, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; int _imopVarPre198; _imopVarPre198 = jcol >= firstcol; if (_imopVarPre198) { _imopVarPre198 = jcol <= lastcol; } if (_imopVarPre198) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; int _imopVarPre200; _imopVarPre200 = irow >= firstrow; if (_imopVarPre200) { _imopVarPre200 = irow <= lastrow; } if (_imopVarPre200) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } for (i = firstrow; i <= lastrow; i++) { int _imopVarPre202; _imopVarPre202 = i >= firstcol; if (_imopVarPre202) { _imopVarPre202 = i <= lastcol; } if (_imopVarPre202) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } int *_imopVarPre205; int *_imopVarPre206; _imopVarPre205 = &(iv[n]); _imopVarPre206 = &(iv[0]); double *x_imopVarPre75; int *mark; int *nzloc; x_imopVarPre75 = v; mark = _imopVarPre206; nzloc = _imopVarPre205; int nrows; int i_imopVarPre76; int j; int jajp1; int nza; int k; int nzrow; double xi; nrows = lastrow - firstrow + 1; #pragma omp parallel default(shared) private(j) { #pragma omp for nowait for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = 0; } } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j++) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } #pragma omp parallel default(shared) private(k, j) { #pragma omp for nowait for (j = 0; j <= nrows - 1; j++) { for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { a[k] = 0.0; } } } for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } for (j = nrows; j >= 1; j--) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; nza = 0; #pragma omp parallel default(shared) private(i_imopVarPre76) { #pragma omp for nowait for (i_imopVarPre76 = 1; i_imopVarPre76 <= n; i_imopVarPre76++) { x_imopVarPre75[i_imopVarPre76] = 0.0; mark[i_imopVarPre76] = 0; } } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; for (k = jajp1; k < rowstr[j + 1]; k++) { i_imopVarPre76 = colidx[k]; x_imopVarPre75[i_imopVarPre76] = x_imopVarPre75[i_imopVarPre76] + a[k]; int _imopVarPre208; _imopVarPre208 = mark[i_imopVarPre76] == 0; if (_imopVarPre208) { _imopVarPre208 = x_imopVarPre75[i_imopVarPre76] != 0.0; } if (_imopVarPre208) { mark[i_imopVarPre76] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i_imopVarPre76; } } for (k = 1; k <= nzrow; k++) { i_imopVarPre76 = nzloc[k]; mark[i_imopVarPre76] = 0; xi = x_imopVarPre75[i_imopVarPre76]; x_imopVarPre75[i_imopVarPre76] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i_imopVarPre76; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } static void sparse(double a[], int colidx[] , int rowstr[] , int n , int arow[] , int acol[] , double aelt[] , int firstrow , int lastrow , double x[] , boolean mark[] , int nzloc[] , int nnza) { int nrows; int i; int j; int jajp1; int nza; int k; int nzrow; double xi; nrows = lastrow - firstrow + 1; #pragma omp parallel default(shared) private(j) { #pragma omp for nowait for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = 0; } } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j++) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } #pragma omp parallel default(shared) private(k, j) { #pragma omp for nowait for (j = 0; j <= nrows - 1; j++) { for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { a[k] = 0.0; } } } for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } for (j = nrows; j >= 1; j--) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; nza = 0; #pragma omp parallel default(shared) private(i) { #pragma omp for nowait for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = 0; } } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; for (k = jajp1; k < rowstr[j + 1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; int _imopVarPre208; _imopVarPre208 = mark[i] == 0; if (_imopVarPre208) { _imopVarPre208 = x[i] != 0.0; } if (_imopVarPre208) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; } } for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = 0; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } static void sprnvc(int n, int nz , double v[] , int iv[] , int nzloc[] , int mark[]) { int nn1; int nzrow; int nzv; int ii; int i; double vecelt; double vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); while (nzv < nz) { double *_imopVarPre210; double _imopVarPre211; _imopVarPre210 = &tran; _imopVarPre211 = randlc(_imopVarPre210, amult); vecelt = _imopVarPre211; double *_imopVarPre213; double _imopVarPre214; _imopVarPre213 = &tran; _imopVarPre214 = randlc(_imopVarPre213, amult); vecloc = _imopVarPre214; int _imopVarPre216; _imopVarPre216 = icnvrt(vecloc, nn1); i = _imopVarPre216 + 1; if (i > n) { continue; } if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } static int icnvrt(double x, int ipwr2) { return ((int) (ipwr2 * x)); } static void vecset(int n, double v[] , int iv[] , int *nzv , int i , double val) { int k; boolean set; set = 0; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = 1; } } if (set == 0) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } }
struct __sFILEX; int printf(const char *restrict,...); void exit(int); extern double fabs(double); extern double pow(double, double); extern double sqrt(double); typedef int boolean; extern double randlc(double *, double); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char class, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; static int colidx[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static int rowstr[1400 + 1 + 1]; static int iv[2 * 1400 + 1 + 1]; static int arow[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static int acol[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double v[1400 + 1 + 1]; static double aelt[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double a[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double x[1400 + 2 + 1]; static double z[1400 + 2 + 1]; static double p[1400 + 2 + 1]; static double q[1400 + 2 + 1]; static double r[1400 + 2 + 1]; static double amult; static double tran; static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift); static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza); static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); static int callcount = 0; int main(int argc, char **argv) { int i; int j; int k; int it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t; double mflops; char class; boolean verified; double zeta_verify_value; double epsilon; firstrow = 1; lastrow = 1400; firstcol = 1; lastcol = 1400; int _imopVarPre143; int _imopVarPre144; int _imopVarPre145; _imopVarPre143 = 1400 == 1400; if (_imopVarPre143) { _imopVarPre144 = 7 == 7; if (_imopVarPre144) { _imopVarPre145 = 15 == 15; if (_imopVarPre145) { _imopVarPre145 = 10.0 == 10.0; } _imopVarPre144 = _imopVarPre145; } _imopVarPre143 = _imopVarPre144; } if (_imopVarPre143) { class = 'S'; zeta_verify_value = 8.5971775078648; } else { int _imopVarPre149; int _imopVarPre150; int _imopVarPre151; _imopVarPre149 = 1400 == 7000; if (_imopVarPre149) { _imopVarPre150 = 7 == 8; if (_imopVarPre150) { _imopVarPre151 = 15 == 15; if (_imopVarPre151) { _imopVarPre151 = 10.0 == 12.0; } _imopVarPre150 = _imopVarPre151; } _imopVarPre149 = _imopVarPre150; } if (_imopVarPre149) { class = 'W'; zeta_verify_value = 10.362595087124; } else { int _imopVarPre155; int _imopVarPre156; int _imopVarPre157; _imopVarPre155 = 1400 == 14000; if (_imopVarPre155) { _imopVarPre156 = 7 == 11; if (_imopVarPre156) { _imopVarPre157 = 15 == 15; if (_imopVarPre157) { _imopVarPre157 = 10.0 == 20.0; } _imopVarPre156 = _imopVarPre157; } _imopVarPre155 = _imopVarPre156; } if (_imopVarPre155) { class = 'A'; zeta_verify_value = 17.130235054029; } else { int _imopVarPre161; int _imopVarPre162; int _imopVarPre163; _imopVarPre161 = 1400 == 75000; if (_imopVarPre161) { _imopVarPre162 = 7 == 13; if (_imopVarPre162) { _imopVarPre163 = 15 == 75; if (_imopVarPre163) { _imopVarPre163 = 10.0 == 60.0; } _imopVarPre162 = _imopVarPre163; } _imopVarPre161 = _imopVarPre162; } if (_imopVarPre161) { class = 'B'; zeta_verify_value = 22.712745482631; } else { int _imopVarPre167; int _imopVarPre168; int _imopVarPre169; _imopVarPre167 = 1400 == 150000; if (_imopVarPre167) { _imopVarPre168 = 7 == 15; if (_imopVarPre168) { _imopVarPre169 = 15 == 75; if (_imopVarPre169) { _imopVarPre169 = 10.0 == 110.0; } _imopVarPre168 = _imopVarPre169; } _imopVarPre167 = _imopVarPre168; } if (_imopVarPre167) { class = 'C'; zeta_verify_value = 28.973605592845; } else { class = 'U'; } } } } } printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", 1400); printf(" Iterations: %5d\n", 15); naa = 1400; nzz = 1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2); tran = 314159265.0; amult = 1220703125.0; double *_imopVarPre171; double _imopVarPre172; _imopVarPre171 = &tran; _imopVarPre172 = randlc(_imopVarPre171, amult); zeta = _imopVarPre172; int n; int nz; int nonzer; double rcond; double shift; n = naa; nz = nzz; nonzer = 7; rcond = 1.0e-1; shift = 10.0; int i_imopVarPre77; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; double size; double ratio; double scale; int jcol; size = 1.0; double _imopVarPre189; double _imopVarPre190; _imopVarPre189 = (1.0 / (double)n); _imopVarPre190 = pow(rcond, _imopVarPre189); ratio = _imopVarPre190; nnza = 0; for (i_imopVarPre77 = 1; i_imopVarPre77 <= n; i_imopVarPre77++) { colidx[n + i_imopVarPre77] = 0; } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; int *_imopVarPre193; int *_imopVarPre194; _imopVarPre193 = &(colidx[n]); _imopVarPre194 = &(colidx[0]); sprnvc(n, nzv, v, iv, _imopVarPre194, _imopVarPre193); int *_imopVarPre196; _imopVarPre196 = &nzv; vecset(n, v, iv, _imopVarPre196, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; int _imopVarPre198; _imopVarPre198 = jcol >= firstcol; if (_imopVarPre198) { _imopVarPre198 = jcol <= lastcol; } if (_imopVarPre198) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; int _imopVarPre200; _imopVarPre200 = irow >= firstrow; if (_imopVarPre200) { _imopVarPre200 = irow <= lastrow; } if (_imopVarPre200) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } for (i_imopVarPre77 = firstrow; i_imopVarPre77 <= lastrow; i_imopVarPre77++) { int _imopVarPre202; _imopVarPre202 = i_imopVarPre77 >= firstcol; if (_imopVarPre202) { _imopVarPre202 = i_imopVarPre77 <= lastcol; } if (_imopVarPre202) { iouter = n + i_imopVarPre77; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i_imopVarPre77; arow[nnza] = i_imopVarPre77; aelt[nnza] = rcond - shift; } } int *_imopVarPre205; int *_imopVarPre206; _imopVarPre205 = &(iv[n]); _imopVarPre206 = &(iv[0]); double *x_imopVarPre75; int *mark; int *nzloc; x_imopVarPre75 = v; mark = _imopVarPre206; nzloc = _imopVarPre205; int nrows; int i_imopVarPre76; int j_imopVarPre78; int jajp1; int nza; int k_imopVarPre79; int nzrow; double xi; nrows = lastrow - firstrow + 1; for (j_imopVarPre78 = 1; j_imopVarPre78 <= n; j_imopVarPre78++) { rowstr[j_imopVarPre78] = 0; mark[j_imopVarPre78] = 0; } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j_imopVarPre78 = (arow[nza] - firstrow + 1) + 1; rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + 1; } rowstr[1] = 1; for (j_imopVarPre78 = 2; j_imopVarPre78 <= nrows + 1; j_imopVarPre78++) { rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + rowstr[j_imopVarPre78 - 1]; } for (j_imopVarPre78 = 0; j_imopVarPre78 <= nrows - 1; j_imopVarPre78++) { for (k_imopVarPre79 = rowstr[j_imopVarPre78]; k_imopVarPre79 <= rowstr[j_imopVarPre78 + 1] - 1; k_imopVarPre79++) { a[k_imopVarPre79] = 0.0; } } for (nza = 1; nza <= nnza; nza++) { j_imopVarPre78 = arow[nza] - firstrow + 1; k_imopVarPre79 = rowstr[j_imopVarPre78]; a[k_imopVarPre79] = aelt[nza]; colidx[k_imopVarPre79] = acol[nza]; rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + 1; } for (j_imopVarPre78 = nrows; j_imopVarPre78 >= 1; j_imopVarPre78--) { rowstr[j_imopVarPre78 + 1] = rowstr[j_imopVarPre78]; } rowstr[1] = 1; nza = 0; for (i_imopVarPre76 = 1; i_imopVarPre76 <= n; i_imopVarPre76++) { x_imopVarPre75[i_imopVarPre76] = 0.0; mark[i_imopVarPre76] = 0; } jajp1 = rowstr[1]; for (j_imopVarPre78 = 1; j_imopVarPre78 <= nrows; j_imopVarPre78++) { nzrow = 0; for (k_imopVarPre79 = jajp1; k_imopVarPre79 < rowstr[j_imopVarPre78 + 1]; k_imopVarPre79++) { i_imopVarPre76 = colidx[k_imopVarPre79]; x_imopVarPre75[i_imopVarPre76] = x_imopVarPre75[i_imopVarPre76] + a[k_imopVarPre79]; int _imopVarPre208; _imopVarPre208 = mark[i_imopVarPre76] == 0; if (_imopVarPre208) { _imopVarPre208 = x_imopVarPre75[i_imopVarPre76] != 0.0; } if (_imopVarPre208) { mark[i_imopVarPre76] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i_imopVarPre76; } } for (k_imopVarPre79 = 1; k_imopVarPre79 <= nzrow; k_imopVarPre79++) { i_imopVarPre76 = nzloc[k_imopVarPre79]; mark[i_imopVarPre76] = 0; xi = x_imopVarPre75[i_imopVarPre76]; x_imopVarPre75[i_imopVarPre76] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i_imopVarPre76; } } jajp1 = rowstr[j_imopVarPre78 + 1]; rowstr[j_imopVarPre78 + 1] = nza + rowstr[1]; } for (j = 1; j <= lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j + 1]; k++) { colidx[k] = colidx[k] - firstcol + 1; } } for (i = 1; i <= 1400 + 1; i++) { x[i] = 1.0; } for (j = 1; j <= lastcol - firstcol + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } zeta = 0.0; for (it = 1; it <= 1; it++) { double sum; double rho; double rho0; double alpha; double beta; double *rnorm_imopVarPre80; int j_imopVarPre81; int k_imopVarPre82; int cgit; int cgitmax = 25; double d; double *_imopVarPre174; #pragma omp master { _imopVarPre174 = &rnorm; rnorm_imopVarPre80 = _imopVarPre174; rho = 0.0; } for (j_imopVarPre81 = 1; j_imopVarPre81 <= naa + 1; j_imopVarPre81++) { q[j_imopVarPre81] = 0.0; z[j_imopVarPre81] = 0.0; r[j_imopVarPre81] = x[j_imopVarPre81]; p[j_imopVarPre81] = r[j_imopVarPre81]; } // for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { rho = rho + r[j_imopVarPre81] * r[j_imopVarPre81]; } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastrow - firstrow + 1; j_imopVarPre81++) { sum = 0.0; for (k_imopVarPre82 = rowstr[j_imopVarPre81]; k_imopVarPre82 < rowstr[j_imopVarPre81 + 1]; k_imopVarPre82++) { sum = sum + a[k_imopVarPre82] * p[colidx[k_imopVarPre82]]; } q[j_imopVarPre81] = sum; } // for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { d = d + p[j_imopVarPre81] * q[j_imopVarPre81]; } // alpha = rho0 / d; for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { z[j_imopVarPre81] = z[j_imopVarPre81] + alpha * p[j_imopVarPre81]; r[j_imopVarPre81] = r[j_imopVarPre81] - alpha * q[j_imopVarPre81]; rho = rho + r[j_imopVarPre81] * r[j_imopVarPre81]; } // beta = rho / rho0; for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { p[j_imopVarPre81] = r[j_imopVarPre81] + beta * p[j_imopVarPre81]; } callcount++; } #pragma omp master { sum = 0.0; } double _imopVarPre187; for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastrow - firstrow + 1; j_imopVarPre81++) { d = 0.0; for (k_imopVarPre82 = rowstr[j_imopVarPre81]; k_imopVarPre82 <= rowstr[j_imopVarPre81 + 1] - 1; k_imopVarPre82++) { d = d + a[k_imopVarPre82] * z[colidx[k_imopVarPre82]]; } r[j_imopVarPre81] = d; } // for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { d = x[j_imopVarPre81] - r[j_imopVarPre81]; sum = sum + d * d; } // _imopVarPre187 = sqrt(sum); (*rnorm_imopVarPre80) = _imopVarPre187; norm_temp11 = 0.0; norm_temp12 = 0.0; // double _imopVarPre176; for (j = 1; j <= lastcol - firstcol + 1; j++) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } // _imopVarPre176 = sqrt(norm_temp12); norm_temp12 = 1.0 / _imopVarPre176; // for (j = 1; j <= lastcol - firstcol + 1; j++) { x[j] = norm_temp12 * z[j]; } } for (i = 1; i <= 1400 + 1; i++) { x[i] = 1.0; } zeta = 0.0; timer_clear(1); timer_start(1); for (it = 1; it <= 15; it++) { double rho0; double alpha; double beta; double *rnorm_imopVarPre83; int j_imopVarPre84; int k_imopVarPre85; int cgit; int cgitmax = 25; double d; double sum; double rho; double *_imopVarPre178; #pragma omp master { _imopVarPre178 = &rnorm; rnorm_imopVarPre83 = _imopVarPre178; rho = 0.0; } for (j_imopVarPre84 = 1; j_imopVarPre84 <= naa + 1; j_imopVarPre84++) { q[j_imopVarPre84] = 0.0; z[j_imopVarPre84] = 0.0; r[j_imopVarPre84] = x[j_imopVarPre84]; p[j_imopVarPre84] = r[j_imopVarPre84]; } // for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { rho = rho + r[j_imopVarPre84] * r[j_imopVarPre84]; } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastrow - firstrow + 1; j_imopVarPre84++) { sum = 0.0; for (k_imopVarPre85 = rowstr[j_imopVarPre84]; k_imopVarPre85 < rowstr[j_imopVarPre84 + 1]; k_imopVarPre85++) { sum = sum + a[k_imopVarPre85] * p[colidx[k_imopVarPre85]]; } q[j_imopVarPre84] = sum; } // for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { d = d + p[j_imopVarPre84] * q[j_imopVarPre84]; } // alpha = rho0 / d; for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { z[j_imopVarPre84] = z[j_imopVarPre84] + alpha * p[j_imopVarPre84]; r[j_imopVarPre84] = r[j_imopVarPre84] - alpha * q[j_imopVarPre84]; rho = rho + r[j_imopVarPre84] * r[j_imopVarPre84]; } // beta = rho / rho0; for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { p[j_imopVarPre84] = r[j_imopVarPre84] + beta * p[j_imopVarPre84]; } callcount++; } #pragma omp master { sum = 0.0; } double _imopVarPre187; for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastrow - firstrow + 1; j_imopVarPre84++) { d = 0.0; for (k_imopVarPre85 = rowstr[j_imopVarPre84]; k_imopVarPre85 <= rowstr[j_imopVarPre84 + 1] - 1; k_imopVarPre85++) { d = d + a[k_imopVarPre85] * z[colidx[k_imopVarPre85]]; } r[j_imopVarPre84] = d; } // for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { d = x[j_imopVarPre84] - r[j_imopVarPre84]; sum = sum + d * d; } // _imopVarPre187 = sqrt(sum); (*rnorm_imopVarPre83) = _imopVarPre187; norm_temp11 = 0.0; norm_temp12 = 0.0; // double _imopVarPre180; for (j = 1; j <= lastcol - firstcol + 1; j++) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } // _imopVarPre180 = sqrt(norm_temp12); norm_temp12 = 1.0 / _imopVarPre180; zeta = 10.0 + 1.0 / norm_temp11; if (it == 1) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); // for (j = 1; j <= lastcol - firstcol + 1; j++) { x[j] = norm_temp12 * z[j]; } } timer_stop(1); t = timer_read(1); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (class != 'U') { double _imopVarPre183; double _imopVarPre184; _imopVarPre183 = zeta - zeta_verify_value; _imopVarPre184 = fabs(_imopVarPre183); if (_imopVarPre184 <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); double _imopVarPre186; _imopVarPre186 = zeta - zeta_verify_value; printf(" Error is %20.12e\n", _imopVarPre186); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { mflops = (2.0 * 15 * 1400) * (3.0 + (7 * (7 + 1)) + 25.0 * (5.0 + (7 * (7 + 1))) + 3.0) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG", class, 1400, 0, 0, 15, nthreads, t, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randdp"); } static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) { int j; int k; int cgit; int cgitmax = 25; double d; double sum; double rho; double rho0; double alpha; double beta; #pragma omp master { rho = 0.0; } for (j = 1; j <= naa + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; } // for (j = 1; j <= lastcol - firstcol + 1; j++) { rho = rho + r[j] * r[j]; } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } for (j = 1; j <= lastrow - firstrow + 1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j + 1]; k++) { sum = sum + a[k] * p[colidx[k]]; } q[j] = sum; } // for (j = 1; j <= lastcol - firstcol + 1; j++) { d = d + p[j] * q[j]; } // alpha = rho0 / d; for (j = 1; j <= lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha * p[j]; r[j] = r[j] - alpha * q[j]; rho = rho + r[j] * r[j]; } // beta = rho / rho0; for (j = 1; j <= lastcol - firstcol + 1; j++) { p[j] = r[j] + beta * p[j]; } callcount++; } #pragma omp master { sum = 0.0; } double _imopVarPre187; for (j = 1; j <= lastrow - firstrow + 1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { d = d + a[k] * z[colidx[k]]; } r[j] = d; } // for (j = 1; j <= lastcol - firstcol + 1; j++) { d = x[j] - r[j]; sum = sum + d * d; } // _imopVarPre187 = sqrt(sum); (*rnorm) = _imopVarPre187; } static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift) { int i; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; double size; double ratio; double scale; int jcol; size = 1.0; double _imopVarPre189; double _imopVarPre190; _imopVarPre189 = (1.0 / (double)n); _imopVarPre190 = pow(rcond, _imopVarPre189); ratio = _imopVarPre190; nnza = 0; for (i = 1; i <= n; i++) { colidx[n + i] = 0; } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; int *_imopVarPre193; int *_imopVarPre194; _imopVarPre193 = &(colidx[n]); _imopVarPre194 = &(colidx[0]); sprnvc(n, nzv, v, iv, _imopVarPre194, _imopVarPre193); int *_imopVarPre196; _imopVarPre196 = &nzv; vecset(n, v, iv, _imopVarPre196, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; int _imopVarPre198; _imopVarPre198 = jcol >= firstcol; if (_imopVarPre198) { _imopVarPre198 = jcol <= lastcol; } if (_imopVarPre198) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; int _imopVarPre200; _imopVarPre200 = irow >= firstrow; if (_imopVarPre200) { _imopVarPre200 = irow <= lastrow; } if (_imopVarPre200) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } for (i = firstrow; i <= lastrow; i++) { int _imopVarPre202; _imopVarPre202 = i >= firstcol; if (_imopVarPre202) { _imopVarPre202 = i <= lastcol; } if (_imopVarPre202) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } int *_imopVarPre205; int *_imopVarPre206; _imopVarPre205 = &(iv[n]); _imopVarPre206 = &(iv[0]); double *x_imopVarPre75; int *mark; int *nzloc; x_imopVarPre75 = v; mark = _imopVarPre206; nzloc = _imopVarPre205; int nrows; int i_imopVarPre76; int j; int jajp1; int nza; int k; int nzrow; double xi; nrows = lastrow - firstrow + 1; for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = 0; } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j++) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } for (j = 0; j <= nrows - 1; j++) { for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { a[k] = 0.0; } } for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } for (j = nrows; j >= 1; j--) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; nza = 0; for (i_imopVarPre76 = 1; i_imopVarPre76 <= n; i_imopVarPre76++) { x_imopVarPre75[i_imopVarPre76] = 0.0; mark[i_imopVarPre76] = 0; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; for (k = jajp1; k < rowstr[j + 1]; k++) { i_imopVarPre76 = colidx[k]; x_imopVarPre75[i_imopVarPre76] = x_imopVarPre75[i_imopVarPre76] + a[k]; int _imopVarPre208; _imopVarPre208 = mark[i_imopVarPre76] == 0; if (_imopVarPre208) { _imopVarPre208 = x_imopVarPre75[i_imopVarPre76] != 0.0; } if (_imopVarPre208) { mark[i_imopVarPre76] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i_imopVarPre76; } } for (k = 1; k <= nzrow; k++) { i_imopVarPre76 = nzloc[k]; mark[i_imopVarPre76] = 0; xi = x_imopVarPre75[i_imopVarPre76]; x_imopVarPre75[i_imopVarPre76] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i_imopVarPre76; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza) { int nrows; int i; int j; int jajp1; int nza; int k; int nzrow; double xi; nrows = lastrow - firstrow + 1; for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = 0; } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j++) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } for (j = 0; j <= nrows - 1; j++) { for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { a[k] = 0.0; } } for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } for (j = nrows; j >= 1; j--) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; nza = 0; for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = 0; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; for (k = jajp1; k < rowstr[j + 1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; int _imopVarPre208; _imopVarPre208 = mark[i] == 0; if (_imopVarPre208) { _imopVarPre208 = x[i] != 0.0; } if (_imopVarPre208) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; } } for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = 0; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]) { int nn1; int nzrow; int nzv; int ii; int i; double vecelt; double vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); while (nzv < nz) { double *_imopVarPre210; double _imopVarPre211; _imopVarPre210 = &tran; _imopVarPre211 = randlc(_imopVarPre210, amult); vecelt = _imopVarPre211; double *_imopVarPre213; double _imopVarPre214; _imopVarPre213 = &tran; _imopVarPre214 = randlc(_imopVarPre213, amult); vecloc = _imopVarPre214; int _imopVarPre216; _imopVarPre216 = icnvrt(vecloc, nn1); i = _imopVarPre216 + 1; if (i > n) { continue; } if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } static int icnvrt(double x, int ipwr2) { return ((int)(ipwr2 * x)); } static void vecset(int n, double v[], int iv[], int *nzv, int i, double val) { int k; boolean set; set = 0; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = 1; } } if (set == 0) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } }
struct __sFILEX; int printf(const char *restrict,...); void exit(int); extern double fabs(double); extern double pow(double, double); extern double sqrt(double); typedef int boolean; extern double randlc(double *, double); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char class, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; static int colidx[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static int rowstr[1400 + 1 + 1]; static int iv[2 * 1400 + 1 + 1]; static int arow[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static int acol[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double v[1400 + 1 + 1]; static double aelt[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double a[1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2) + 1]; static double x[1400 + 2 + 1]; static double z[1400 + 2 + 1]; static double p[1400 + 2 + 1]; static double q[1400 + 2 + 1]; static double r[1400 + 2 + 1]; static double amult; static double tran; static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm); static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift); static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza); static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]); static int icnvrt(double x, int ipwr2); static void vecset(int n, double v[], int iv[], int *nzv, int i, double val); static int callcount = 0; int main(int argc, char **argv) { int i; int j; int k; int it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t; double mflops; char class; boolean verified; double zeta_verify_value; double epsilon; firstrow = 1; lastrow = 1400; firstcol = 1; lastcol = 1400; int _imopVarPre143; int _imopVarPre144; int _imopVarPre145; _imopVarPre143 = 1400 == 1400; if (_imopVarPre143) { _imopVarPre144 = 7 == 7; if (_imopVarPre144) { _imopVarPre145 = 15 == 15; if (_imopVarPre145) { _imopVarPre145 = 10.0 == 10.0; } _imopVarPre144 = _imopVarPre145; } _imopVarPre143 = _imopVarPre144; } if (_imopVarPre143) { class = 'S'; zeta_verify_value = 8.5971775078648; } else { int _imopVarPre149; int _imopVarPre150; int _imopVarPre151; _imopVarPre149 = 1400 == 7000; if (_imopVarPre149) { _imopVarPre150 = 7 == 8; if (_imopVarPre150) { _imopVarPre151 = 15 == 15; if (_imopVarPre151) { _imopVarPre151 = 10.0 == 12.0; } _imopVarPre150 = _imopVarPre151; } _imopVarPre149 = _imopVarPre150; } if (_imopVarPre149) { class = 'W'; zeta_verify_value = 10.362595087124; } else { int _imopVarPre155; int _imopVarPre156; int _imopVarPre157; _imopVarPre155 = 1400 == 14000; if (_imopVarPre155) { _imopVarPre156 = 7 == 11; if (_imopVarPre156) { _imopVarPre157 = 15 == 15; if (_imopVarPre157) { _imopVarPre157 = 10.0 == 20.0; } _imopVarPre156 = _imopVarPre157; } _imopVarPre155 = _imopVarPre156; } if (_imopVarPre155) { class = 'A'; zeta_verify_value = 17.130235054029; } else { int _imopVarPre161; int _imopVarPre162; int _imopVarPre163; _imopVarPre161 = 1400 == 75000; if (_imopVarPre161) { _imopVarPre162 = 7 == 13; if (_imopVarPre162) { _imopVarPre163 = 15 == 75; if (_imopVarPre163) { _imopVarPre163 = 10.0 == 60.0; } _imopVarPre162 = _imopVarPre163; } _imopVarPre161 = _imopVarPre162; } if (_imopVarPre161) { class = 'B'; zeta_verify_value = 22.712745482631; } else { int _imopVarPre167; int _imopVarPre168; int _imopVarPre169; _imopVarPre167 = 1400 == 150000; if (_imopVarPre167) { _imopVarPre168 = 7 == 15; if (_imopVarPre168) { _imopVarPre169 = 15 == 75; if (_imopVarPre169) { _imopVarPre169 = 10.0 == 110.0; } _imopVarPre168 = _imopVarPre169; } _imopVarPre167 = _imopVarPre168; } if (_imopVarPre167) { class = 'C'; zeta_verify_value = 28.973605592845; } else { class = 'U'; } } } } } printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - CG Benchmark\n"); printf(" Size: %10d\n", 1400); printf(" Iterations: %5d\n", 15); naa = 1400; nzz = 1400 * (7 + 1) * (7 + 1) + 1400 * (7 + 2); tran = 314159265.0; amult = 1220703125.0; double *_imopVarPre171; double _imopVarPre172; _imopVarPre171 = &tran; _imopVarPre172 = randlc(_imopVarPre171, amult); zeta = _imopVarPre172; int n; int nz; int nonzer; double rcond; double shift; n = naa; nz = nzz; nonzer = 7; rcond = 1.0e-1; shift = 10.0; int i_imopVarPre77; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; double size; double ratio; double scale; int jcol; size = 1.0; double _imopVarPre189; double _imopVarPre190; _imopVarPre189 = (1.0 / (double)n); _imopVarPre190 = pow(rcond, _imopVarPre189); ratio = _imopVarPre190; nnza = 0; #pragma omp parallel default(shared) private(i_imopVarPre77) { #pragma omp for nowait for (i_imopVarPre77 = 1; i_imopVarPre77 <= n; i_imopVarPre77++) { colidx[n + i_imopVarPre77] = 0; } } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; int *_imopVarPre193; int *_imopVarPre194; _imopVarPre193 = &(colidx[n]); _imopVarPre194 = &(colidx[0]); sprnvc(n, nzv, v, iv, _imopVarPre194, _imopVarPre193); int *_imopVarPre196; _imopVarPre196 = &nzv; vecset(n, v, iv, _imopVarPre196, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; int _imopVarPre198; _imopVarPre198 = jcol >= firstcol; if (_imopVarPre198) { _imopVarPre198 = jcol <= lastcol; } if (_imopVarPre198) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; int _imopVarPre200; _imopVarPre200 = irow >= firstrow; if (_imopVarPre200) { _imopVarPre200 = irow <= lastrow; } if (_imopVarPre200) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } for (i_imopVarPre77 = firstrow; i_imopVarPre77 <= lastrow; i_imopVarPre77++) { int _imopVarPre202; _imopVarPre202 = i_imopVarPre77 >= firstcol; if (_imopVarPre202) { _imopVarPre202 = i_imopVarPre77 <= lastcol; } if (_imopVarPre202) { iouter = n + i_imopVarPre77; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i_imopVarPre77; arow[nnza] = i_imopVarPre77; aelt[nnza] = rcond - shift; } } int *_imopVarPre205; int *_imopVarPre206; _imopVarPre205 = &(iv[n]); _imopVarPre206 = &(iv[0]); double *x_imopVarPre75; int *mark; int *nzloc; x_imopVarPre75 = v; mark = _imopVarPre206; nzloc = _imopVarPre205; int nrows; int i_imopVarPre76; int j_imopVarPre78; int jajp1; int nza; int k_imopVarPre79; int nzrow; double xi; nrows = lastrow - firstrow + 1; #pragma omp parallel default(shared) private(j_imopVarPre78) { #pragma omp for nowait for (j_imopVarPre78 = 1; j_imopVarPre78 <= n; j_imopVarPre78++) { rowstr[j_imopVarPre78] = 0; mark[j_imopVarPre78] = 0; } } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j_imopVarPre78 = (arow[nza] - firstrow + 1) + 1; rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + 1; } rowstr[1] = 1; for (j_imopVarPre78 = 2; j_imopVarPre78 <= nrows + 1; j_imopVarPre78++) { rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + rowstr[j_imopVarPre78 - 1]; } #pragma omp parallel default(shared) private(k_imopVarPre79, j_imopVarPre78) { #pragma omp for nowait for (j_imopVarPre78 = 0; j_imopVarPre78 <= nrows - 1; j_imopVarPre78++) { for (k_imopVarPre79 = rowstr[j_imopVarPre78]; k_imopVarPre79 <= rowstr[j_imopVarPre78 + 1] - 1; k_imopVarPre79++) { a[k_imopVarPre79] = 0.0; } } } for (nza = 1; nza <= nnza; nza++) { j_imopVarPre78 = arow[nza] - firstrow + 1; k_imopVarPre79 = rowstr[j_imopVarPre78]; a[k_imopVarPre79] = aelt[nza]; colidx[k_imopVarPre79] = acol[nza]; rowstr[j_imopVarPre78] = rowstr[j_imopVarPre78] + 1; } for (j_imopVarPre78 = nrows; j_imopVarPre78 >= 1; j_imopVarPre78--) { rowstr[j_imopVarPre78 + 1] = rowstr[j_imopVarPre78]; } rowstr[1] = 1; nza = 0; #pragma omp parallel default(shared) private(i_imopVarPre76) { #pragma omp for nowait for (i_imopVarPre76 = 1; i_imopVarPre76 <= n; i_imopVarPre76++) { x_imopVarPre75[i_imopVarPre76] = 0.0; mark[i_imopVarPre76] = 0; } } jajp1 = rowstr[1]; for (j_imopVarPre78 = 1; j_imopVarPre78 <= nrows; j_imopVarPre78++) { nzrow = 0; for (k_imopVarPre79 = jajp1; k_imopVarPre79 < rowstr[j_imopVarPre78 + 1]; k_imopVarPre79++) { i_imopVarPre76 = colidx[k_imopVarPre79]; x_imopVarPre75[i_imopVarPre76] = x_imopVarPre75[i_imopVarPre76] + a[k_imopVarPre79]; int _imopVarPre208; _imopVarPre208 = mark[i_imopVarPre76] == 0; if (_imopVarPre208) { _imopVarPre208 = x_imopVarPre75[i_imopVarPre76] != 0.0; } if (_imopVarPre208) { mark[i_imopVarPre76] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i_imopVarPre76; } } for (k_imopVarPre79 = 1; k_imopVarPre79 <= nzrow; k_imopVarPre79++) { i_imopVarPre76 = nzloc[k_imopVarPre79]; mark[i_imopVarPre76] = 0; xi = x_imopVarPre75[i_imopVarPre76]; x_imopVarPre75[i_imopVarPre76] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i_imopVarPre76; } } jajp1 = rowstr[j_imopVarPre78 + 1]; rowstr[j_imopVarPre78 + 1] = nza + rowstr[1]; } #pragma omp parallel default(shared) private(i, j, k) { #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { for (k = rowstr[j]; k < rowstr[j + 1]; k++) { colidx[k] = colidx[k] - firstcol + 1; } } #pragma omp for nowait for (i = 1; i <= 1400 + 1; i++) { x[i] = 1.0; } #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } } zeta = 0.0; for (it = 1; it <= 1; it++) { double sum; double rho; double rho0; double alpha; double beta; double *rnorm_imopVarPre80; int j_imopVarPre81; int k_imopVarPre82; int cgit; int cgitmax = 25; double d; #pragma omp parallel default(shared) private(j_imopVarPre81, sum) shared(rho, naa) { double *_imopVarPre174; #pragma omp master { _imopVarPre174 = &rnorm; rnorm_imopVarPre80 = _imopVarPre174; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= naa + 1; j_imopVarPre81++) { q[j_imopVarPre81] = 0.0; z[j_imopVarPre81] = 0.0; r[j_imopVarPre81] = x[j_imopVarPre81]; p[j_imopVarPre81] = r[j_imopVarPre81]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:rho) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { rho = rho + r[j_imopVarPre81] * r[j_imopVarPre81]; } } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp parallel default(shared) private(j_imopVarPre81, k_imopVarPre82, sum, alpha, beta) shared(d, rho0, rho) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastrow - firstrow + 1; j_imopVarPre81++) { sum = 0.0; for (k_imopVarPre82 = rowstr[j_imopVarPre81]; k_imopVarPre82 < rowstr[j_imopVarPre81 + 1]; k_imopVarPre82++) { sum = sum + a[k_imopVarPre82] * p[colidx[k_imopVarPre82]]; } q[j_imopVarPre81] = sum; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:d) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { d = d + p[j_imopVarPre81] * q[j_imopVarPre81]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier alpha = rho0 / d; #pragma omp for reduction(+:rho) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { z[j_imopVarPre81] = z[j_imopVarPre81] + alpha * p[j_imopVarPre81]; r[j_imopVarPre81] = r[j_imopVarPre81] - alpha * q[j_imopVarPre81]; rho = rho + r[j_imopVarPre81] * r[j_imopVarPre81]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier beta = rho / rho0; #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { p[j_imopVarPre81] = r[j_imopVarPre81] + beta * p[j_imopVarPre81]; } callcount++; } } #pragma omp parallel default(shared) private(j_imopVarPre81, d, j) shared(sum) { #pragma omp master { sum = 0.0; } double _imopVarPre187; #pragma omp for nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastrow - firstrow + 1; j_imopVarPre81++) { d = 0.0; for (k_imopVarPre82 = rowstr[j_imopVarPre81]; k_imopVarPre82 <= rowstr[j_imopVarPre81 + 1] - 1; k_imopVarPre82++) { d = d + a[k_imopVarPre82] * z[colidx[k_imopVarPre82]]; } r[j_imopVarPre81] = d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:sum) nowait for (j_imopVarPre81 = 1; j_imopVarPre81 <= lastcol - firstcol + 1; j_imopVarPre81++) { d = x[j_imopVarPre81] - r[j_imopVarPre81]; sum = sum + d * d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre187 = sqrt(sum); (*rnorm_imopVarPre80) = _imopVarPre187; } #pragma omp master { norm_temp11 = 0.0; norm_temp12 = 0.0; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier double _imopVarPre176; #pragma omp for reduction(+:norm_temp11, norm_temp12) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre176 = sqrt(norm_temp12); norm_temp12 = 1.0 / _imopVarPre176; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { x[j] = norm_temp12 * z[j]; } } } #pragma omp parallel default(shared) private(i) { #pragma omp for nowait for (i = 1; i <= 1400 + 1; i++) { x[i] = 1.0; } } zeta = 0.0; timer_clear(1); timer_start(1); for (it = 1; it <= 15; it++) { double rho0; double alpha; double beta; double *rnorm_imopVarPre83; int j_imopVarPre84; int k_imopVarPre85; int cgit; int cgitmax = 25; double d; double sum; double rho; #pragma omp parallel default(shared) private(j_imopVarPre84, sum) shared(rho, naa) { double *_imopVarPre178; #pragma omp master { _imopVarPre178 = &rnorm; rnorm_imopVarPre83 = _imopVarPre178; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= naa + 1; j_imopVarPre84++) { q[j_imopVarPre84] = 0.0; z[j_imopVarPre84] = 0.0; r[j_imopVarPre84] = x[j_imopVarPre84]; p[j_imopVarPre84] = r[j_imopVarPre84]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:rho) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { rho = rho + r[j_imopVarPre84] * r[j_imopVarPre84]; } } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp parallel default(shared) private(j_imopVarPre84, k_imopVarPre85, sum, alpha, beta) shared(d, rho0, rho) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastrow - firstrow + 1; j_imopVarPre84++) { sum = 0.0; for (k_imopVarPre85 = rowstr[j_imopVarPre84]; k_imopVarPre85 < rowstr[j_imopVarPre84 + 1]; k_imopVarPre85++) { sum = sum + a[k_imopVarPre85] * p[colidx[k_imopVarPre85]]; } q[j_imopVarPre84] = sum; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:d) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { d = d + p[j_imopVarPre84] * q[j_imopVarPre84]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier alpha = rho0 / d; #pragma omp for reduction(+:rho) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { z[j_imopVarPre84] = z[j_imopVarPre84] + alpha * p[j_imopVarPre84]; r[j_imopVarPre84] = r[j_imopVarPre84] - alpha * q[j_imopVarPre84]; rho = rho + r[j_imopVarPre84] * r[j_imopVarPre84]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier beta = rho / rho0; #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { p[j_imopVarPre84] = r[j_imopVarPre84] + beta * p[j_imopVarPre84]; } callcount++; } } #pragma omp parallel default(shared) private(j_imopVarPre84, d, j) shared(sum) { #pragma omp master { sum = 0.0; } double _imopVarPre187; #pragma omp for nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastrow - firstrow + 1; j_imopVarPre84++) { d = 0.0; for (k_imopVarPre85 = rowstr[j_imopVarPre84]; k_imopVarPre85 <= rowstr[j_imopVarPre84 + 1] - 1; k_imopVarPre85++) { d = d + a[k_imopVarPre85] * z[colidx[k_imopVarPre85]]; } r[j_imopVarPre84] = d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:sum) nowait for (j_imopVarPre84 = 1; j_imopVarPre84 <= lastcol - firstcol + 1; j_imopVarPre84++) { d = x[j_imopVarPre84] - r[j_imopVarPre84]; sum = sum + d * d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre187 = sqrt(sum); (*rnorm_imopVarPre83) = _imopVarPre187; } #pragma omp master { norm_temp11 = 0.0; norm_temp12 = 0.0; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier double _imopVarPre180; #pragma omp for reduction(+:norm_temp11, norm_temp12) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre180 = sqrt(norm_temp12); norm_temp12 = 1.0 / _imopVarPre180; zeta = 10.0 + 1.0 / norm_temp11; if (it == 1) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta); } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { x[j] = norm_temp12 * z[j]; } } } #pragma omp parallel { } timer_stop(1); t = timer_read(1); printf(" Benchmark completed\n"); epsilon = 1.0e-10; if (class != 'U') { double _imopVarPre183; double _imopVarPre184; _imopVarPre183 = zeta - zeta_verify_value; _imopVarPre184 = fabs(_imopVarPre183); if (_imopVarPre184 <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n", zeta); double _imopVarPre186; _imopVarPre186 = zeta - zeta_verify_value; printf(" Error is %20.12e\n", _imopVarPre186); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n", zeta); printf(" The correct zeta is %20.12e\n", zeta_verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { mflops = (2.0 * 15 * 1400) * (3.0 + (7 * (7 + 1)) + 25.0 * (5.0 + (7 * (7 + 1))) + 3.0) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG", class, 1400, 0, 0, 15, nthreads, t, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randdp"); } static void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) { int j; int k; int cgit; int cgitmax = 25; double d; double sum; double rho; double rho0; double alpha; double beta; #pragma omp parallel default(shared) private(j, sum) shared(rho, naa) { #pragma omp master { rho = 0.0; } #pragma omp for nowait for (j = 1; j <= naa + 1; j++) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:rho) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { rho = rho + r[j] * r[j]; } } for (cgit = 1; cgit <= cgitmax; cgit++) { #pragma omp parallel default(shared) private(j, k, sum, alpha, beta) shared(d, rho0, rho) { #pragma omp master { rho0 = rho; d = 0.0; rho = 0.0; } #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { sum = 0.0; for (k = rowstr[j]; k < rowstr[j + 1]; k++) { sum = sum + a[k] * p[colidx[k]]; } q[j] = sum; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:d) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { d = d + p[j] * q[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier alpha = rho0 / d; #pragma omp for reduction(+:rho) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { z[j] = z[j] + alpha * p[j]; r[j] = r[j] - alpha * q[j]; rho = rho + r[j] * r[j]; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier beta = rho / rho0; #pragma omp for nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { p[j] = r[j] + beta * p[j]; } callcount++; } } #pragma omp parallel default(shared) private(j, d) shared(sum) { #pragma omp master { sum = 0.0; } double _imopVarPre187; #pragma omp for nowait for (j = 1; j <= lastrow - firstrow + 1; j++) { d = 0.0; for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { d = d + a[k] * z[colidx[k]]; } r[j] = d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp for reduction(+:sum) nowait for (j = 1; j <= lastcol - firstcol + 1; j++) { d = x[j] - r[j]; sum = sum + d * d; } // #pragma omp dummyFlush BARRIER_START #pragma omp barrier #pragma omp master { _imopVarPre187 = sqrt(sum); (*rnorm) = _imopVarPre187; } } } static void makea(int n, int nz, double a[], int colidx[], int rowstr[], int nonzer, int firstrow, int lastrow, int firstcol, int lastcol, double rcond, int arow[], int acol[], double aelt[], double v[], int iv[], double shift) { int i; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; double size; double ratio; double scale; int jcol; size = 1.0; double _imopVarPre189; double _imopVarPre190; _imopVarPre189 = (1.0 / (double)n); _imopVarPre190 = pow(rcond, _imopVarPre189); ratio = _imopVarPre190; nnza = 0; #pragma omp parallel default(shared) private(i) { #pragma omp for nowait for (i = 1; i <= n; i++) { colidx[n + i] = 0; } } for (iouter = 1; iouter <= n; iouter++) { nzv = nonzer; int *_imopVarPre193; int *_imopVarPre194; _imopVarPre193 = &(colidx[n]); _imopVarPre194 = &(colidx[0]); sprnvc(n, nzv, v, iv, _imopVarPre194, _imopVarPre193); int *_imopVarPre196; _imopVarPre196 = &nzv; vecset(n, v, iv, _imopVarPre196, iouter, 0.5); for (ivelt = 1; ivelt <= nzv; ivelt++) { jcol = iv[ivelt]; int _imopVarPre198; _imopVarPre198 = jcol >= firstcol; if (_imopVarPre198) { _imopVarPre198 = jcol <= lastcol; } if (_imopVarPre198) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) { irow = iv[ivelt1]; int _imopVarPre200; _imopVarPre200 = irow >= firstrow; if (_imopVarPre200) { _imopVarPre200 = irow <= lastrow; } if (_imopVarPre200) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in" " makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } for (i = firstrow; i <= lastrow; i++) { int _imopVarPre202; _imopVarPre202 = i >= firstcol; if (_imopVarPre202) { _imopVarPre202 = i <= lastcol; } if (_imopVarPre202) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n", nnza, nz); printf("iouter = %d\n", iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } int *_imopVarPre205; int *_imopVarPre206; _imopVarPre205 = &(iv[n]); _imopVarPre206 = &(iv[0]); double *x_imopVarPre75; int *mark; int *nzloc; x_imopVarPre75 = v; mark = _imopVarPre206; nzloc = _imopVarPre205; int nrows; int i_imopVarPre76; int j; int jajp1; int nza; int k; int nzrow; double xi; nrows = lastrow - firstrow + 1; #pragma omp parallel default(shared) private(j) { #pragma omp for nowait for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = 0; } } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j++) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } #pragma omp parallel default(shared) private(k, j) { #pragma omp for nowait for (j = 0; j <= nrows - 1; j++) { for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { a[k] = 0.0; } } } for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } for (j = nrows; j >= 1; j--) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; nza = 0; #pragma omp parallel default(shared) private(i_imopVarPre76) { #pragma omp for nowait for (i_imopVarPre76 = 1; i_imopVarPre76 <= n; i_imopVarPre76++) { x_imopVarPre75[i_imopVarPre76] = 0.0; mark[i_imopVarPre76] = 0; } } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; for (k = jajp1; k < rowstr[j + 1]; k++) { i_imopVarPre76 = colidx[k]; x_imopVarPre75[i_imopVarPre76] = x_imopVarPre75[i_imopVarPre76] + a[k]; int _imopVarPre208; _imopVarPre208 = mark[i_imopVarPre76] == 0; if (_imopVarPre208) { _imopVarPre208 = x_imopVarPre75[i_imopVarPre76] != 0.0; } if (_imopVarPre208) { mark[i_imopVarPre76] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i_imopVarPre76; } } for (k = 1; k <= nzrow; k++) { i_imopVarPre76 = nzloc[k]; mark[i_imopVarPre76] = 0; xi = x_imopVarPre75[i_imopVarPre76]; x_imopVarPre75[i_imopVarPre76] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i_imopVarPre76; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } static void sparse(double a[], int colidx[], int rowstr[], int n, int arow[], int acol[], double aelt[], int firstrow, int lastrow, double x[], boolean mark[], int nzloc[], int nnza) { int nrows; int i; int j; int jajp1; int nza; int k; int nzrow; double xi; nrows = lastrow - firstrow + 1; #pragma omp parallel default(shared) private(j) { #pragma omp for nowait for (j = 1; j <= n; j++) { rowstr[j] = 0; mark[j] = 0; } } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza++) { j = (arow[nza] - firstrow + 1) + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j++) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } #pragma omp parallel default(shared) private(k, j) { #pragma omp for nowait for (j = 0; j <= nrows - 1; j++) { for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k++) { a[k] = 0.0; } } } for (nza = 1; nza <= nnza; nza++) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } for (j = nrows; j >= 1; j--) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; nza = 0; #pragma omp parallel default(shared) private(i) { #pragma omp for nowait for (i = 1; i <= n; i++) { x[i] = 0.0; mark[i] = 0; } } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j++) { nzrow = 0; for (k = jajp1; k < rowstr[j + 1]; k++) { i = colidx[k]; x[i] = x[i] + a[k]; int _imopVarPre208; _imopVarPre208 = mark[i] == 0; if (_imopVarPre208) { _imopVarPre208 = x[i] != 0.0; } if (_imopVarPre208) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; } } for (k = 1; k <= nzrow; k++) { i = nzloc[k]; mark[i] = 0; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[], int mark[]) { int nn1; int nzrow; int nzv; int ii; int i; double vecelt; double vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; } while (nn1 < n); while (nzv < nz) { double *_imopVarPre210; double _imopVarPre211; _imopVarPre210 = &tran; _imopVarPre211 = randlc(_imopVarPre210, amult); vecelt = _imopVarPre211; double *_imopVarPre213; double _imopVarPre214; _imopVarPre213 = &tran; _imopVarPre214 = randlc(_imopVarPre213, amult); vecloc = _imopVarPre214; int _imopVarPre216; _imopVarPre216 = icnvrt(vecloc, nn1); i = _imopVarPre216 + 1; if (i > n) { continue; } if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii++) { i = nzloc[ii]; mark[i] = 0; } } static int icnvrt(double x, int ipwr2) { return ((int)(ipwr2 * x)); } static void vecset(int n, double v[], int iv[], int *nzv, int i, double val) { int k; boolean set; set = 0; for (k = 1; k <= *nzv; k++) { if (iv[k] == i) { v[k] = val; set = 1; } } if (set == 0) { *nzv = *nzv + 1; v[*nzv] = val; iv[*nzv] = i; } }
GB_unop__minv_int8_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_int8_int8 // op(A') function: GB_unop_tran__minv_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_int8_int8 // op(A') function: GB_unop_tran__minv_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__minv_int8_int8 // op(A') function: GB_unop_tran__minv_int8_int8 // C type: int8_t // A type: int8_t // cast: int8_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ int8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__minv_int8_int8 ( int8_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int8_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__minv_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel-reduction-nowait.c
/* * parallel-reduction-nowait.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0, i; int sum1 = 0; int sum2 = 0; // Number of threads is empirical: We need enough threads so that // the reduction is really performed hierarchically in the barrier! #pragma omp parallel num_threads(5) reduction(+ : var) { #pragma omp for schedule(static) nowait reduction(+ : sum1) for (i = 0; i < 5; i++) sum1 += i; #pragma omp for schedule(static) reduction(+ : sum2) for (i = 0; i < 5; i++) sum2 += i; var = sum1 + sum2; } fprintf(stderr, "DONE\n"); int error = (var != 100); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
/* * parallel-reduction-nowait.c -- Archer testcase */ //=== ----------------------------------------------------------------------== = // // //Part of the LLVM Project, under the Apache License v2 .0 with LLVM Exceptions. // //See tools / archer / LICENSE.txt for details . // SPDX - License - Identifier:Apache - 2.0 WITH LLVM - exception // //===----------------------------------------------------------------------== = // //RUN: %libarcher - compile - and - run | FileCheck % s #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0, i; int sum1 = 0; int sum2 = 0; //Number of threads is empirical:We need enough threads so that // the reduction is really performed hierarchically in the barrier ! for (i = 0; i < 5; i++) sum1 += i; for (i = 0; i < 5; i++) sum2 += i; var = sum1 + sum2; fprintf(stderr, "DONE\n"); int error = (var != 100); return error; } //CHECK - NOT: ThreadSanitizer:data race // CHECK - NOT: ThreadSanitizer:reported // CHECK:DONE
/* * parallel-reduction-nowait.c -- Archer testcase */ //=== ----------------------------------------------------------------------== = // // //Part of the LLVM Project, under the Apache License v2 .0 with LLVM Exceptions. // //See tools / archer / LICENSE.txt for details . // SPDX - License - Identifier:Apache - 2.0 WITH LLVM - exception // //===----------------------------------------------------------------------== = // //RUN: %libarcher - compile - and - run | FileCheck % s #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0, i; int sum1 = 0; int sum2 = 0; //Number of threads is empirical:We need enough threads so that // the reduction is really performed hierarchically in the barrier ! #pragma omp parallel num_threads(5) reduction(+ : var) { #pragma omp for schedule(static) nowait reduction(+ : sum1) for (i = 0; i < 5; i++) sum1 += i; #pragma omp for schedule(static) reduction(+ : sum2) for (i = 0; i < 5; i++) sum2 += i; var = sum1 + sum2; } fprintf(stderr, "DONE\n"); int error = (var != 100); return error; } //CHECK - NOT: ThreadSanitizer:data race // CHECK - NOT: ThreadSanitizer:reported // CHECK:DONE
GB_unaryop__ainv_uint8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int16 // op(A') function: GB_tran__ainv_uint8_int16 // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int16 ( uint8_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int16 // op(A') function: GB_tran__ainv_uint8_int16 // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int16 ( uint8_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int16 // op(A') function: GB_tran__ainv_uint8_int16 // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int16 ( uint8_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__plus_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_03__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int8) // A*D function (colscale): GB (_AxD__plus_int8) // D*A function (rowscale): GB (_DxB__plus_int8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int8) // C=scalar+B GB (_bind1st__plus_int8) // C=scalar+B' GB (_bind1st_tran__plus_int8) // C=A+scalar GB (_bind2nd__plus_int8) // C=A'+scalar GB (_bind2nd_tran__plus_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_03__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int8) // A*D function (colscale): GB (_AxD__plus_int8) // D*A function (rowscale): GB (_DxB__plus_int8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int8) // C=scalar+B GB (_bind1st__plus_int8) // C=scalar+B' GB (_bind1st_tran__plus_int8) // C=A+scalar GB (_bind2nd__plus_int8) // C=A'+scalar GB (_bind2nd_tran__plus_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_03__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int8) // A*D function (colscale): GB (_AxD__plus_int8) // D*A function (rowscale): GB (_DxB__plus_int8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int8) // C=scalar+B GB (_bind1st__plus_int8) // C=scalar+B' GB (_bind1st_tran__plus_int8) // C=A+scalar GB (_bind2nd__plus_int8) // C=A'+scalar GB (_bind2nd_tran__plus_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__atan_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atan_fp64_fp64) // op(A') function: GB (_unop_tran__atan_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = atan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atan (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = atan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atan_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atan_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atan_fp64_fp64) // op(A') function: GB (_unop_tran__atan_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = atan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atan (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = atan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atan_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atan_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atan_fp64_fp64) // op(A') function: GB (_unop_tran__atan_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = atan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atan (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = atan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atan_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atan_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
9014.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 2 ? t4 + 127 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 16) for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; for (t4 = 1; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); for (t4 = 0; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); for (t4 = 0; t4 <= nx - 2; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 2 ? t4 + 127 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 16) for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 1 ? t4 + 127 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 16) for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 128) for (t6 = t4; t6 <= (t4 + 127 < nx - 2 ? t4 + 127 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 16) for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
rnn_helpers.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #ifdef _WIN32 #pragma warning(disable : 4267) #endif #include <algorithm> #include <functional> #include <future> #include <string> #include <vector> #include "gsl/span" #include "gsl/gsl_algorithm" #include "core/common/common.h" #include "core/common/logging/logging.h" #include "core/framework/allocator.h" #include "core/util/math.h" #include "core/util/math_cpuonly.h" #include "core/platform/threadpool.h" namespace onnxruntime { class Tensor; class OpKernelContext; namespace rnn { namespace detail { enum Direction { kForward = 0, kReverse = 1, kBidirectional = 2 }; inline Direction MakeDirection(const std::string& direction) { if (direction == "forward") { return kForward; } if (direction == "reverse") { return kReverse; } if (direction == "bidirectional") { return kBidirectional; } ORT_THROW("Invalid 'direction' argument of '", direction, "'. Must be one of 'forward', 'reverse', or 'bidirectional'."); } /** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe @param allocator IAllocator to use for the allocation. @param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'. @param unique_ptr unique_ptr that will control the lifetime of the allocated memory. @param fill If true, fill the allocated memory with fill_value. @param fill_value Value to use if 'fill' is true. @returns A span to provide bounds checked access to the allocated memory. */ template <typename TAlloc> gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator, size_t size, IAllocatorUniquePtr<TAlloc>& unique_ptr, bool fill = false, TAlloc fill_value = TAlloc{}) { unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size); auto span = gsl::make_span(unique_ptr.get(), size); if (fill) { // Do't use span.begin() it will cause performance issue and stop compiler to optimize the code std::fill_n(unique_ptr.get(), size, fill_value); } return span; } // validate the common inputs to RNN, LSTM and GRU operators Status ValidateCommonRnnInputs(const Tensor& X, const Tensor& W, const Tensor& R, const Tensor* B, int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs const Tensor* sequence_lens, const Tensor* initial_h, int64_t num_directions, int64_t hidden_size); /// Copy an input array repeatedly to an output array /// @param input_begin Beginning of input /// @param input_end End of input /// @param output Output iterator /// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized. /// @returns Position of output iterator after copy is completed template <typename TInIter, typename TOutIter> TOutIter RepeatVectorToConstructArray(TInIter input_begin, TInIter input_end, TOutIter output, int64_t repetitions) { for (int64_t i = 0; i < repetitions; i++) { output = std::copy(input_begin, input_end, output); } return output; } // reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size] // and output to shape [seq_length, num_directions, batch_size, hidden_size] template <typename T> void ReverseSequence(gsl::span<const T> inputs, gsl::span<T> inputs_reverse, gsl::span<const int> sequence_lengths, const int max_sequence_length, const int batch_size, const int input_size, const int num_directions) { for (int i = 0; i < batch_size; i++) { int seq_len = sequence_lengths[i]; #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = 0; j < seq_len; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = seq_len; j < max_sequence_length; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } } } // A has size M x K, B has size N x K (transposed), and C has size M x N // We check that A, B and C are large enough before calling the lower level GEMM implementation template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter> void ComputeGemm(const int M, const int N, const int K, const float alpha, TSpanAIter A, TSpanAIter A_end, const int lda, TSpanBIter B, TSpanBIter B_end, const int ldb, const float beta, TSpanCIter C, TSpanCIter C_end, const int ldc, concurrency::ThreadPool* tp) { // validate all the inputs // need to use the lda/ldb/ldc strides which should be >= the columns for the span ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N); ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end); ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); ::onnxruntime::math::GemmEx<float>( CblasNoTrans, CblasTrans, M, N, K, alpha, &*A, lda, &*B, ldb, beta, &*C, ldc, tp); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur, typename gsl::span<T>::const_iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data(); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T>::iterator cur, typename gsl::span<T>::iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data() + offset; } template <typename TLambda> void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step, onnxruntime::concurrency::ThreadPool& ttp, const ::onnxruntime::logging::Logger& logger) { // #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug #ifdef NOTHREADS ORT_UNUSED_PARAMETER(ttp); ORT_UNUSED_PARAMETER(logger); for (int i = 0; i < max; i += step) { (void)name; std::bind(lambda, i)(); } #else ORT_UNUSED_PARAMETER(name); ORT_UNUSED_PARAMETER(logger); // ORT_ENFORCE may and does throw at times from within the tasks that run // on a thread-pool. Without propagating exceptions the process exits silently // which will make diagnosing bugs more difficult. // \! UGLY // We have a problem here with the current thread-pool is that it takes std::function // by value and copies it more than once (even though it is movable). // // To report status and exceptions properly it's better to use // futures and promises but they are not copyable, so we can't come up with a functor // with a promise member and we are downgrading to C++11 where we can't have captures that moved in. // // At the same time promises MUST live in the child thread so if we throw from the main thread // we don't destroy any promises that are on the main thread stack which children threads may still be using. // // The only solution with the current Eigen that comes to mind is to have shared_ptr to with std::promise. // const int total_tasks = max / (step > 0 ? step : 1) + (max % step > 0 ? 1 : 0); std::vector<std::future<void> > futures; futures.reserve(total_tasks); for (int i = 0, t = 0; i < max; i += step, ++t) { auto p_ptr = std::make_shared<std::promise<void> >(); futures.push_back(p_ptr->get_future()); ttp.Schedule([p_ptr, lambda, i]() { try { lambda(i); p_ptr->set_value(); } catch (...) { p_ptr->set_exception(std::current_exception()); } }); } // We'd like to wait until all of the tasks have finished // even though one or more have already thrown. We will store // the first exception and then will re-throw at the end. std::exception_ptr pending_exception; for (auto& fut : futures) { try { // get() will re-throw any exceptions // the running task may throw fut.get(); } catch (...) { if (!pending_exception) { pending_exception = std::current_exception(); } } } if (pending_exception) { std::rethrow_exception(pending_exception); } #endif } void DumpMatrixImpl(const std::string& name, const float* src, int row, int col, int offset = 0, int col_width = -1); // Helper class to wrap the processing of the activation funcs and any alpha/beta values. // The alpha/beta values are consumed in the order of the activation funcs. once they run out // defaults will be used as needed. // The Entries property contains the normalized function names and the alpha/beta value to use. class ActivationFuncs { public: struct Entry { const std::string name; const float alpha; const float beta; }; ActivationFuncs() = default; ActivationFuncs(const std::vector<std::string>& funcs, const std::vector<float>& alphas, const std::vector<float>& betas); const std::vector<Entry>& Entries() const { return entries_; } private: std::vector<Entry> entries_; }; namespace deepcpu { using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int); using ClipWithBiasFuncPtr = void (*)(float, const float*, float*, const int); using ActivationFuncPtr = void (*)(float*, int, float, float); using ActivationFuncBPtr = void (*)(const float*, float*, int, float, float); using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, int, float, float); using GruResetGateFuncPtr = void (*)(const float*, float*, float*, int, float, float); using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, int, float, float); ActivationFuncPtr ActivationFuncByName(const std::string& func); LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func); GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func); GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func); void add_bias_into_ignore(const float* ignored, const float* pd, int c); void add_bias_into(const float* ps, float* pd, int c); void clip(float b, float* pd, int c); void clip_add_bias(float b, const float* pb, float* pd, int c); void clip_ignore_bias(float b, const float* pb, float* pd, int c); void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void relu_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid(float* pd, int c, float alpha, float beta); void tanh(float* pd, int c, float alpha, float beta); void relu(float* pd, int c, float alpha, float beta); void sigmoid_exact(float* pd, int c, float alpha, float beta); void tanh_exact(float* pd, int c, float alpha, float beta); void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, int c); void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_relu(const float* ps1, const float* ps2, float* pd, int c, float alpha, float beta); void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_relu(const float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); inline void elementwise_product(const float* op1, const float* op2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += op1[i] * op2[i]; } inline void elementwise_sum1(const float* src, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src[i]; } inline void elementwise_sum2(const float* src1, const float* src2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src1[i] + src2[i]; } } // namespace deepcpu } // namespace detail } // namespace rnn } // namespace onnxruntime
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #ifdef _WIN32 #pragma warning(disable : 4267) #endif #include <algorithm> #include <functional> #include <future> #include <string> #include <vector> #include "gsl/span" #include "gsl/gsl_algorithm" #include "core/common/common.h" #include "core/common/logging/logging.h" #include "core/framework/allocator.h" #include "core/util/math.h" #include "core/util/math_cpuonly.h" #include "core/platform/threadpool.h" namespace onnxruntime { class Tensor; class OpKernelContext; namespace rnn { namespace detail { enum Direction { kForward = 0, kReverse = 1, kBidirectional = 2 }; inline Direction MakeDirection(const std::string& direction) { if (direction == "forward") { return kForward; } if (direction == "reverse") { return kReverse; } if (direction == "bidirectional") { return kBidirectional; } ORT_THROW("Invalid 'direction' argument of '", direction, "'. Must be one of 'forward', 'reverse', or 'bidirectional'."); } /** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe @param allocator IAllocator to use for the allocation. @param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'. @param unique_ptr unique_ptr that will control the lifetime of the allocated memory. @param fill If true, fill the allocated memory with fill_value. @param fill_value Value to use if 'fill' is true. @returns A span to provide bounds checked access to the allocated memory. */ template <typename TAlloc> gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator, size_t size, IAllocatorUniquePtr<TAlloc>& unique_ptr, bool fill = false, TAlloc fill_value = TAlloc{}) { unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size); auto span = gsl::make_span(unique_ptr.get(), size); if (fill) { // Do't use span.begin() it will cause performance issue and stop compiler to optimize the code std::fill_n(unique_ptr.get(), size, fill_value); } return span; } // validate the common inputs to RNN, LSTM and GRU operators Status ValidateCommonRnnInputs(const Tensor& X, const Tensor& W, const Tensor& R, const Tensor* B, int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs const Tensor* sequence_lens, const Tensor* initial_h, int64_t num_directions, int64_t hidden_size); /// Copy an input array repeatedly to an output array /// @param input_begin Beginning of input /// @param input_end End of input /// @param output Output iterator /// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized. /// @returns Position of output iterator after copy is completed template <typename TInIter, typename TOutIter> TOutIter RepeatVectorToConstructArray(TInIter input_begin, TInIter input_end, TOutIter output, int64_t repetitions) { for (int64_t i = 0; i < repetitions; i++) { output = std::copy(input_begin, input_end, output); } return output; } // reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size] // and output to shape [seq_length, num_directions, batch_size, hidden_size] template <typename T> void ReverseSequence(gsl::span<const T> inputs, gsl::span<T> inputs_reverse, gsl::span<const int> sequence_lengths, const int max_sequence_length, const int batch_size, const int input_size, const int num_directions) { for (int i = 0; i < batch_size; i++) { int seq_len = sequence_lengths[i]; for (int j = 0; j < seq_len; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } for (int j = seq_len; j < max_sequence_length; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } } } // A has size M x K, B has size N x K (transposed), and C has size M x N // We check that A, B and C are large enough before calling the lower level GEMM implementation template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter> void ComputeGemm(const int M, const int N, const int K, const float alpha, TSpanAIter A, TSpanAIter A_end, const int lda, TSpanBIter B, TSpanBIter B_end, const int ldb, const float beta, TSpanCIter C, TSpanCIter C_end, const int ldc, concurrency::ThreadPool* tp) { // validate all the inputs // need to use the lda/ldb/ldc strides which should be >= the columns for the span ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N); ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end); ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); ::onnxruntime::math::GemmEx<float>( CblasNoTrans, CblasTrans, M, N, K, alpha, &*A, lda, &*B, ldb, beta, &*C, ldc, tp); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur, typename gsl::span<T>::const_iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data(); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T>::iterator cur, typename gsl::span<T>::iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data() + offset; } template <typename TLambda> void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step, onnxruntime::concurrency::ThreadPool& ttp, const ::onnxruntime::logging::Logger& logger) { // #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug #ifdef NOTHREADS ORT_UNUSED_PARAMETER(ttp); ORT_UNUSED_PARAMETER(logger); for (int i = 0; i < max; i += step) { (void)name; std::bind(lambda, i)(); } #else ORT_UNUSED_PARAMETER(name); ORT_UNUSED_PARAMETER(logger); // ORT_ENFORCE may and does throw at times from within the tasks that run // on a thread-pool. Without propagating exceptions the process exits silently // which will make diagnosing bugs more difficult. // \! UGLY // We have a problem here with the current thread-pool is that it takes std::function // by value and copies it more than once (even though it is movable). // // To report status and exceptions properly it's better to use // futures and promises but they are not copyable, so we can't come up with a functor // with a promise member and we are downgrading to C++11 where we can't have captures that moved in. // // At the same time promises MUST live in the child thread so if we throw from the main thread // we don't destroy any promises that are on the main thread stack which children threads may still be using. // // The only solution with the current Eigen that comes to mind is to have shared_ptr to with std::promise. // const int total_tasks = max / (step > 0 ? step : 1) + (max % step > 0 ? 1 : 0); std::vector<std::future<void> > futures; futures.reserve(total_tasks); for (int i = 0, t = 0; i < max; i += step, ++t) { auto p_ptr = std::make_shared<std::promise<void> >(); futures.push_back(p_ptr->get_future()); ttp.Schedule([p_ptr, lambda, i]() { try { lambda(i); p_ptr->set_value(); } catch (...) { p_ptr->set_exception(std::current_exception()); } }); } // We'd like to wait until all of the tasks have finished // even though one or more have already thrown. We will store // the first exception and then will re-throw at the end. std::exception_ptr pending_exception; for (auto& fut : futures) { try { // get() will re-throw any exceptions // the running task may throw fut.get(); } catch (...) { if (!pending_exception) { pending_exception = std::current_exception(); } } } if (pending_exception) { std::rethrow_exception(pending_exception); } #endif } void DumpMatrixImpl(const std::string& name, const float* src, int row, int col, int offset = 0, int col_width = -1); // Helper class to wrap the processing of the activation funcs and any alpha/beta values. // The alpha/beta values are consumed in the order of the activation funcs. once they run out // defaults will be used as needed. // The Entries property contains the normalized function names and the alpha/beta value to use. class ActivationFuncs { public: struct Entry { const std::string name; const float alpha; const float beta; }; ActivationFuncs() = default; ActivationFuncs(const std::vector<std::string>& funcs, const std::vector<float>& alphas, const std::vector<float>& betas); const std::vector<Entry>& Entries() const { return entries_; } private: std::vector<Entry> entries_; }; namespace deepcpu { using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int); using ClipWithBiasFuncPtr = void (*)(float, const float*, float*, const int); using ActivationFuncPtr = void (*)(float*, int, float, float); using ActivationFuncBPtr = void (*)(const float*, float*, int, float, float); using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, int, float, float); using GruResetGateFuncPtr = void (*)(const float*, float*, float*, int, float, float); using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, int, float, float); ActivationFuncPtr ActivationFuncByName(const std::string& func); LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func); GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func); GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func); void add_bias_into_ignore(const float* ignored, const float* pd, int c); void add_bias_into(const float* ps, float* pd, int c); void clip(float b, float* pd, int c); void clip_add_bias(float b, const float* pb, float* pd, int c); void clip_ignore_bias(float b, const float* pb, float* pd, int c); void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void relu_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid(float* pd, int c, float alpha, float beta); void tanh(float* pd, int c, float alpha, float beta); void relu(float* pd, int c, float alpha, float beta); void sigmoid_exact(float* pd, int c, float alpha, float beta); void tanh_exact(float* pd, int c, float alpha, float beta); void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, int c); void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_relu(const float* ps1, const float* ps2, float* pd, int c, float alpha, float beta); void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_relu(const float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); inline void elementwise_product(const float* op1, const float* op2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += op1[i] * op2[i]; } inline void elementwise_sum1(const float* src, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src[i]; } inline void elementwise_sum2(const float* src1, const float* src2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src1[i] + src2[i]; } } // namespace deepcpu } // namespace detail } // namespace rnn } // namespace onnxruntime
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #ifdef _WIN32 #pragma warning(disable : 4267) #endif #include <algorithm> #include <functional> #include <future> #include <string> #include <vector> #include "gsl/span" #include "gsl/gsl_algorithm" #include "core/common/common.h" #include "core/common/logging/logging.h" #include "core/framework/allocator.h" #include "core/util/math.h" #include "core/util/math_cpuonly.h" #include "core/platform/threadpool.h" namespace onnxruntime { class Tensor; class OpKernelContext; namespace rnn { namespace detail { enum Direction { kForward = 0, kReverse = 1, kBidirectional = 2 }; inline Direction MakeDirection(const std::string& direction) { if (direction == "forward") { return kForward; } if (direction == "reverse") { return kReverse; } if (direction == "bidirectional") { return kBidirectional; } ORT_THROW("Invalid 'direction' argument of '", direction, "'. Must be one of 'forward', 'reverse', or 'bidirectional'."); } /** Allocate a unique_ptr using allocator_, and return a span to the allocated memory so usage is safe @param allocator IAllocator to use for the allocation. @param size Allocation size. Number of elements of type TAlloc, or total size if TAlloc is 'void'. @param unique_ptr unique_ptr that will control the lifetime of the allocated memory. @param fill If true, fill the allocated memory with fill_value. @param fill_value Value to use if 'fill' is true. @returns A span to provide bounds checked access to the allocated memory. */ template <typename TAlloc> gsl::span<TAlloc> Allocate(std::shared_ptr<IAllocator> allocator, size_t size, IAllocatorUniquePtr<TAlloc>& unique_ptr, bool fill = false, TAlloc fill_value = TAlloc{}) { unique_ptr = IAllocator::MakeUniquePtr<TAlloc>(allocator, size); auto span = gsl::make_span(unique_ptr.get(), size); if (fill) { // Do't use span.begin() it will cause performance issue and stop compiler to optimize the code std::fill_n(unique_ptr.get(), size, fill_value); } return span; } // validate the common inputs to RNN, LSTM and GRU operators Status ValidateCommonRnnInputs(const Tensor& X, const Tensor& W, const Tensor& R, const Tensor* B, int WRB_dim_1_multipler, // multiplier used with hidden_size for W, R and B inputs const Tensor* sequence_lens, const Tensor* initial_h, int64_t num_directions, int64_t hidden_size); /// Copy an input array repeatedly to an output array /// @param input_begin Beginning of input /// @param input_end End of input /// @param output Output iterator /// @param repetitions Number of times to repeat copy. Assumes output is sufficiently sized. /// @returns Position of output iterator after copy is completed template <typename TInIter, typename TOutIter> TOutIter RepeatVectorToConstructArray(TInIter input_begin, TInIter input_end, TOutIter output, int64_t repetitions) { for (int64_t i = 0; i < repetitions; i++) { output = std::copy(input_begin, input_end, output); } return output; } // reverse an LSTM or GRU sequence which has shape [seq_length, batch_size, hidden_size] // and output to shape [seq_length, num_directions, batch_size, hidden_size] template <typename T> void ReverseSequence(gsl::span<const T> inputs, gsl::span<T> inputs_reverse, gsl::span<const int> sequence_lengths, const int max_sequence_length, const int batch_size, const int input_size, const int num_directions) { for (int i = 0; i < batch_size; i++) { int seq_len = sequence_lengths[i]; #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = 0; j < seq_len; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * (seq_len - j - 1) * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } #ifdef USE_OPENMP // Parallel execute the loop. #pragma omp parallel for #endif for (int j = seq_len; j < max_sequence_length; j++) { gsl::span<const T> src = inputs.subspan(j * batch_size * input_size + i * input_size, input_size); gsl::span<T> dest = inputs_reverse.subspan(num_directions * j * batch_size * input_size + i * input_size, input_size); // Use gsl::copy instead of std::copy() to allow compiler to optimize the code gsl::copy(src, dest); } } } // A has size M x K, B has size N x K (transposed), and C has size M x N // We check that A, B and C are large enough before calling the lower level GEMM implementation template <typename TSpanAIter, typename TSpanBIter, typename TSpanCIter> void ComputeGemm(const int M, const int N, const int K, const float alpha, TSpanAIter A, TSpanAIter A_end, const int lda, TSpanBIter B, TSpanBIter B_end, const int ldb, const float beta, TSpanCIter C, TSpanCIter C_end, const int ldc, concurrency::ThreadPool* tp) { // validate all the inputs // need to use the lda/ldb/ldc strides which should be >= the columns for the span ORT_ENFORCE(lda >= K && ldb >= K && ldc >= N); ORT_ENFORCE(A + (M * lda - (lda - K)) <= A_end); ORT_ENFORCE(B + (N * ldb - (ldb - K)) <= B_end); ORT_ENFORCE(C + (M * ldc - (ldc - N)) <= C_end); ::onnxruntime::math::GemmEx<float>( CblasNoTrans, CblasTrans, M, N, K, alpha, &*A, lda, &*B, ldb, beta, &*C, ldc, tp); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(typename gsl::span<T>::const_iterator cur, typename gsl::span<T>::const_iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> const T* SafeRawConstPointer(gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data(); } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T>::iterator cur, typename gsl::span<T>::iterator end, size_t size) { ORT_ENFORCE(cur + size <= end); return &*cur; } // helper to convert a span to a raw pointer // after validating the memory covered by the span supports the size required template <typename T> T* SafeRawPointer(typename gsl::span<T> span, size_t offset, size_t size) { ORT_ENFORCE(offset + size <= size_t(span.size())); return span.data() + offset; } template <typename TLambda> void ExecuteLambdaInParallel(const std::string& name, TLambda lambda, int max, int step, onnxruntime::concurrency::ThreadPool& ttp, const ::onnxruntime::logging::Logger& logger) { // #define NOTHREADS to execute the lambdas directly and in order if you need to do that to debug #ifdef NOTHREADS ORT_UNUSED_PARAMETER(ttp); ORT_UNUSED_PARAMETER(logger); for (int i = 0; i < max; i += step) { (void)name; std::bind(lambda, i)(); } #else ORT_UNUSED_PARAMETER(name); ORT_UNUSED_PARAMETER(logger); // ORT_ENFORCE may and does throw at times from within the tasks that run // on a thread-pool. Without propagating exceptions the process exits silently // which will make diagnosing bugs more difficult. // \! UGLY // We have a problem here with the current thread-pool is that it takes std::function // by value and copies it more than once (even though it is movable). // // To report status and exceptions properly it's better to use // futures and promises but they are not copyable, so we can't come up with a functor // with a promise member and we are downgrading to C++11 where we can't have captures that moved in. // // At the same time promises MUST live in the child thread so if we throw from the main thread // we don't destroy any promises that are on the main thread stack which children threads may still be using. // // The only solution with the current Eigen that comes to mind is to have shared_ptr to with std::promise. // const int total_tasks = max / (step > 0 ? step : 1) + (max % step > 0 ? 1 : 0); std::vector<std::future<void> > futures; futures.reserve(total_tasks); for (int i = 0, t = 0; i < max; i += step, ++t) { auto p_ptr = std::make_shared<std::promise<void> >(); futures.push_back(p_ptr->get_future()); ttp.Schedule([p_ptr, lambda, i]() { try { lambda(i); p_ptr->set_value(); } catch (...) { p_ptr->set_exception(std::current_exception()); } }); } // We'd like to wait until all of the tasks have finished // even though one or more have already thrown. We will store // the first exception and then will re-throw at the end. std::exception_ptr pending_exception; for (auto& fut : futures) { try { // get() will re-throw any exceptions // the running task may throw fut.get(); } catch (...) { if (!pending_exception) { pending_exception = std::current_exception(); } } } if (pending_exception) { std::rethrow_exception(pending_exception); } #endif } void DumpMatrixImpl(const std::string& name, const float* src, int row, int col, int offset = 0, int col_width = -1); // Helper class to wrap the processing of the activation funcs and any alpha/beta values. // The alpha/beta values are consumed in the order of the activation funcs. once they run out // defaults will be used as needed. // The Entries property contains the normalized function names and the alpha/beta value to use. class ActivationFuncs { public: struct Entry { const std::string name; const float alpha; const float beta; }; ActivationFuncs() = default; ActivationFuncs(const std::vector<std::string>& funcs, const std::vector<float>& alphas, const std::vector<float>& betas); const std::vector<Entry>& Entries() const { return entries_; } private: std::vector<Entry> entries_; }; namespace deepcpu { using AddBiasIntoFuncPtr = void (*)(const float*, float*, const int); using ClipWithBiasFuncPtr = void (*)(float, const float*, float*, const int); using ActivationFuncPtr = void (*)(float*, int, float, float); using ActivationFuncBPtr = void (*)(const float*, float*, int, float, float); using LstmMergeGatesFuncPtr = void (*)(const float*, float*, const float*, float*, int, float, float); using GruResetGateFuncPtr = void (*)(const float*, float*, float*, int, float, float); using GruOutputGateFuncPtr = void (*)(float*, const float*, const float*, float*, int, float, float); ActivationFuncPtr ActivationFuncByName(const std::string& func); LstmMergeGatesFuncPtr LstmMergeGatesFuncByName(const std::string& func); GruResetGateFuncPtr GruResetGateFuncByName(const std::string& func); GruOutputGateFuncPtr GruOutputGateFuncByName(const std::string& func); void add_bias_into_ignore(const float* ignored, const float* pd, int c); void add_bias_into(const float* ps, float* pd, int c); void clip(float b, float* pd, int c); void clip_add_bias(float b, const float* pb, float* pd, int c); void clip_ignore_bias(float b, const float* pb, float* pd, int c); void sigmoid_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_m(const float* ps1, float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void relu_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void tanh_exact_m(const float* ps1, const float* ps1_c, const float* ps2, float* pd, int c, float alpha, float beta); void sigmoid(float* pd, int c, float alpha, float beta); void tanh(float* pd, int c, float alpha, float beta); void relu(float* pd, int c, float alpha, float beta); void sigmoid_exact(float* pd, int c, float alpha, float beta); void tanh_exact(float* pd, int c, float alpha, float beta); void merge_lstm_gates_to_memory(const float* pprev, const float* pi, const float* pf, const float* pg, float* pcurr, int c); void gru_reset_gate_tanh(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_sigmoid(const float* ps1, float* ps2, float* pd, int c, float alpha, float beta); void gru_reset_gate_relu(const float* ps1, const float* ps2, float* pd, int c, float alpha, float beta); void gru_output_gate_tanh(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_sigmoid(float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); void gru_output_gate_relu(const float* ph, const float* pz, const float* ps, float* po, int c, float alpha, float beta); inline void elementwise_product(const float* op1, const float* op2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += op1[i] * op2[i]; } inline void elementwise_sum1(const float* src, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src[i]; } inline void elementwise_sum2(const float* src1, const float* src2, float* dest, int size) { for (int i = 0; i < size; i++) dest[i] += src1[i] + src2[i]; } } // namespace deepcpu } // namespace detail } // namespace rnn } // namespace onnxruntime
dragonfly4_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * based on rawSHA256_fmt.c code * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * The DragonFly BSD 2.10.1-REL crypt-sha2 hashes are seriously broken. See * http://www.openwall.com/lists/john-dev/2012/01/16/1 * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_dragonfly4_32; extern struct fmt_main fmt_dragonfly4_64; #elif FMT_REGISTERS_H john_register_one(&fmt_dragonfly4_32); john_register_one(&fmt_dragonfly4_64); #else #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 2048 // tuned on K8-dual HT #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL_32 "dragonfly4-32" #define FORMAT_LABEL_64 "dragonfly4-64" #define FORMAT_NAME_32 "DragonFly BSD $4$ w/ bugs, 32-bit" #define FORMAT_NAME_64 "DragonFly BSD $4$ w/ bugs, 64-bit" #define FORMAT_TAG "$4$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #if ARCH_BITS >= 64 #define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 84 #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define USED_BINARY_SIZE 62 // Due to base64 bug in DragonBSD crypt-sha512.c #define SALT_SIZE_32 (1+4+8) // 1st char is length #define SALT_SIZE_64 (1+8+8) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests_32[] = { {"$4$7E48ul$K4u43llx1P184KZBoILl2hnFLBHj6.486TtxWA.EA1pLZuQS7P5k0LQqyEULux47.5vttDbSo/Cbpsez.AUI", "magnum"}, {"$4$Hz$5U1s18ntUYE24mF3JN44BYZPN34HBCMw57.Yw2JeKoiBkTVSGBDZEPT325hvR7iw8QYHy9kG7WUW8LCM.6UD", ""}, {"$4$W$79ddF.iDXVPcf/uf8bMFl15leilo1GE8C2KnEAWs3isK930rVy1EZZS2veHgU17NRt4qpKTtZRCA.QC7.68j", "password"}, {"$4$dw7uRHW$Cs6rbZqAVEEp9dsYOl4w/U84YydqdsEYyxHNvAtd2bcLz2Eem9L7FI/aGD2ayAybmprtYZLq2AtdXBio.cX0", "John the Ripper"}, {"$4$2tgCi76D$zy7ms.v1Y8HcsasTaR8n/Ng8GH4dhPv4ozihbM4JMNSJUmw7wVKbcqksefn7nVT.WrN18fV8i1yh7Gmq.cXC", "DragonFly BSD"}, {NULL} }; static struct fmt_tests tests_64[] = { {"$4$7E48ul$9or6.L/T.iChtPIGY4.vIgdYEmMkTW7Ru4OJxtGJtonCQo.wu3.bS4UPlUc2B8CAfGo1Oi5PgQvfhzNQ.A8v", "magnum"}, {"$4$Hz$Mujq0GrjuRtPhcM/0rOfbr2l9fXGfVwKAuL9oL5IH.RnOO1zcgG/S6rSIrebK4g0BEgKGKc0zmWpnk3O..uR", ""}, {"$4$W$.eHqh7OeyhVkBG0lCuUFnEShQq3tZt1QOLUx/9vIt3p56rUMCu2w7iQof7HwWa1pJwcBpPG.7KK3Pcce.oFX", "password"}, {"$4$dw7uRHW$17b2EzV3m0ziCLQoSKzUElTVgkL7cHXQzZzeeuNnkee/bchs0VHGqzjXrMZtWVfK2OW8.GfHvtZgzqGF.IUZ", "John the Ripper"}, {"$4$2tgCi76D$NL8CBWreQkoaVeGVL/a27ZrwYq6M8mlNt.uqc9E9.OiANu6JHdQy2r6J4uAZuD7wKqAQier1YVL7M0IF.gvi", "DragonFly BSD"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out) [(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)]; static char *cur_salt; static int salt_len; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *start; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; for (pos = ciphertext; *pos && *pos != '$'; pos++); if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0; start = ++pos; while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++; if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0; return 1; } #define TO_BINARY(b1, b2, b3) \ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \ ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out[b1] = value >> 16; \ out[b2] = value >> 8; \ out[b3] = value; // Don't copy this code without realising it mimics bugs in the original code! // We are actually missing the last 16 bits with this implementation. static void *get_binary(char *ciphertext) { static uint32_t outbuf[BINARY_SIZE/4]; uint32_t value; char *pos; unsigned char *out = (unsigned char*)outbuf; int i; memset(outbuf, 0, sizeof(outbuf)); pos = strrchr(ciphertext, '$') + 1; for (i = 0; i < 20; i++) { TO_BINARY(i, i + 21, i + 42); } value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); out[20] = value >> 16; out[41] = value >> 8; return (void *)out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { SHA512_CTX ctx; SHA512_Init(&ctx); /* First the password */ SHA512_Update(&ctx, saved_key[index], saved_len[index]); /* Then the salt, including the $4$ magic */ SHA512_Update(&ctx, cur_salt, salt_len); SHA512_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static void set_salt(void *salt) { salt_len = (int)*(char*)salt; cur_salt = (char*)salt + 1; } // For 32-bit version of the bug, our magic is "$4$\0" static void *get_salt_32(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_32); ciphertext += FORMAT_TAG_LEN; strcpy(&out[1], FORMAT_TAG); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[5], ciphertext, len); out[0] = len + 4; return out; } // For 64-bit version of the bug, our magic is "$4$\0/etc" static void *get_salt_64(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_64); ciphertext += FORMAT_TAG_LEN; memcpy(&out[1], "$4$\0/etc", 8); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[9], ciphertext, len); out[0] = len + 8; return out; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], USED_BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], USED_BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { unsigned char *s = (unsigned char*)salt + 1; unsigned int hash = 5381; unsigned int i; for (i = 0; i < *(unsigned char*)salt; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_dragonfly4_32 = { { FORMAT_LABEL_32, FORMAT_NAME_32, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, USED_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_32, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests_32 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_32, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_dragonfly4_64 = { { FORMAT_LABEL_64, FORMAT_NAME_64, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_64, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, tests_64 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_64, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_dragonfly4_32; extern struct fmt_main fmt_dragonfly4_64; #elif FMT_REGISTERS_H john_register_one(&fmt_dragonfly4_32); john_register_one(&fmt_dragonfly4_64); #else #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "memdbg.h" #define FORMAT_LABEL_32 "dragonfly4-32" #define FORMAT_LABEL_64 "dragonfly4-64" #define FORMAT_NAME_32 "DragonFly BSD $4$ w/ bugs, 32-bit" #define FORMAT_NAME_64 "DragonFly BSD $4$ w/ bugs, 64-bit" #define FORMAT_TAG "$4$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #if ARCH_BITS >= 64 #define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 84 #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define USED_BINARY_SIZE 62 // Due to base64 bug in DragonBSD crypt-sha512.c #define SALT_SIZE_32 (1+4+8) // 1st char is length #define SALT_SIZE_64 (1+8+8) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests_32[] = { {"$4$7E48ul$K4u43llx1P184KZBoILl2hnFLBHj6.486TtxWA.EA1pLZuQS7P5k0LQqyEULux47.5vttDbSo/Cbpsez.AUI", "magnum"}, {"$4$Hz$5U1s18ntUYE24mF3JN44BYZPN34HBCMw57.Yw2JeKoiBkTVSGBDZEPT325hvR7iw8QYHy9kG7WUW8LCM.6UD", ""}, {"$4$W$79ddF.iDXVPcf/uf8bMFl15leilo1GE8C2KnEAWs3isK930rVy1EZZS2veHgU17NRt4qpKTtZRCA.QC7.68j", "password"}, {"$4$dw7uRHW$Cs6rbZqAVEEp9dsYOl4w/U84YydqdsEYyxHNvAtd2bcLz2Eem9L7FI/aGD2ayAybmprtYZLq2AtdXBio.cX0", "John the Ripper"}, {"$4$2tgCi76D$zy7ms.v1Y8HcsasTaR8n/Ng8GH4dhPv4ozihbM4JMNSJUmw7wVKbcqksefn7nVT.WrN18fV8i1yh7Gmq.cXC", "DragonFly BSD"}, {NULL} }; static struct fmt_tests tests_64[] = { {"$4$7E48ul$9or6.L/T.iChtPIGY4.vIgdYEmMkTW7Ru4OJxtGJtonCQo.wu3.bS4UPlUc2B8CAfGo1Oi5PgQvfhzNQ.A8v", "magnum"}, {"$4$Hz$Mujq0GrjuRtPhcM/0rOfbr2l9fXGfVwKAuL9oL5IH.RnOO1zcgG/S6rSIrebK4g0BEgKGKc0zmWpnk3O..uR", ""}, {"$4$W$.eHqh7OeyhVkBG0lCuUFnEShQq3tZt1QOLUx/9vIt3p56rUMCu2w7iQof7HwWa1pJwcBpPG.7KK3Pcce.oFX", "password"}, {"$4$dw7uRHW$17b2EzV3m0ziCLQoSKzUElTVgkL7cHXQzZzeeuNnkee/bchs0VHGqzjXrMZtWVfK2OW8.GfHvtZgzqGF.IUZ", "John the Ripper"}, {"$4$2tgCi76D$NL8CBWreQkoaVeGVL/a27ZrwYq6M8mlNt.uqc9E9.OiANu6JHdQy2r6J4uAZuD7wKqAQier1YVL7M0IF.gvi", "DragonFly BSD"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out) [(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)]; static char *cur_salt; static int salt_len; static void init(struct fmt_main *self) { saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *start; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; for (pos = ciphertext; *pos && *pos != '$'; pos++); if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0; start = ++pos; while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++; if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0; return 1; } #define TO_BINARY(b1, b2, b3) \ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \ ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out[b1] = value >> 16; \ out[b2] = value >> 8; \ out[b3] = value; // Don't copy this code without realising it mimics bugs in the original code! // We are actually missing the last 16 bits with this implementation. static void *get_binary(char *ciphertext) { static uint32_t outbuf[BINARY_SIZE/4]; uint32_t value; char *pos; unsigned char *out = (unsigned char*)outbuf; int i; memset(outbuf, 0, sizeof(outbuf)); pos = strrchr(ciphertext, '$') + 1; for (i = 0; i < 20; i++) { TO_BINARY(i, i + 21, i + 42); } value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); out[20] = value >> 16; out[41] = value >> 8; return (void *)out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; { SHA512_CTX ctx; SHA512_Init(&ctx); /* First the password */ SHA512_Update(&ctx, saved_key[index], saved_len[index]); /* Then the salt, including the $4$ magic */ SHA512_Update(&ctx, cur_salt, salt_len); SHA512_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static void set_salt(void *salt) { salt_len = (int)*(char*)salt; cur_salt = (char*)salt + 1; } // For 32-bit version of the bug, our magic is "$4$\0" static void *get_salt_32(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_32); ciphertext += FORMAT_TAG_LEN; strcpy(&out[1], FORMAT_TAG); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[5], ciphertext, len); out[0] = len + 4; return out; } // For 64-bit version of the bug, our magic is "$4$\0/etc" static void *get_salt_64(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_64); ciphertext += FORMAT_TAG_LEN; memcpy(&out[1], "$4$\0/etc", 8); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[9], ciphertext, len); out[0] = len + 8; return out; } static int cmp_all(void *binary, int count) { int index = 0; if (!memcmp(binary, crypt_out[index], USED_BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], USED_BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { unsigned char *s = (unsigned char*)salt + 1; unsigned int hash = 5381; unsigned int i; for (i = 0; i < *(unsigned char*)salt; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_dragonfly4_32 = { { FORMAT_LABEL_32, FORMAT_NAME_32, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, USED_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_32, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests_32 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_32, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_dragonfly4_64 = { { FORMAT_LABEL_64, FORMAT_NAME_64, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_64, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, tests_64 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_64, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_dragonfly4_32; extern struct fmt_main fmt_dragonfly4_64; #elif FMT_REGISTERS_H john_register_one(&fmt_dragonfly4_32); john_register_one(&fmt_dragonfly4_64); #else #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 2048 // tuned on K8-dual HT #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL_32 "dragonfly4-32" #define FORMAT_LABEL_64 "dragonfly4-64" #define FORMAT_NAME_32 "DragonFly BSD $4$ w/ bugs, 32-bit" #define FORMAT_NAME_64 "DragonFly BSD $4$ w/ bugs, 64-bit" #define FORMAT_TAG "$4$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #if ARCH_BITS >= 64 #define ALGORITHM_NAME "SHA512 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "SHA512 32/" ARCH_BITS_STR " " SHA2_LIB #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 84 #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define USED_BINARY_SIZE 62 // Due to base64 bug in DragonBSD crypt-sha512.c #define SALT_SIZE_32 (1+4+8) // 1st char is length #define SALT_SIZE_64 (1+8+8) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests_32[] = { {"$4$7E48ul$K4u43llx1P184KZBoILl2hnFLBHj6.486TtxWA.EA1pLZuQS7P5k0LQqyEULux47.5vttDbSo/Cbpsez.AUI", "magnum"}, {"$4$Hz$5U1s18ntUYE24mF3JN44BYZPN34HBCMw57.Yw2JeKoiBkTVSGBDZEPT325hvR7iw8QYHy9kG7WUW8LCM.6UD", ""}, {"$4$W$79ddF.iDXVPcf/uf8bMFl15leilo1GE8C2KnEAWs3isK930rVy1EZZS2veHgU17NRt4qpKTtZRCA.QC7.68j", "password"}, {"$4$dw7uRHW$Cs6rbZqAVEEp9dsYOl4w/U84YydqdsEYyxHNvAtd2bcLz2Eem9L7FI/aGD2ayAybmprtYZLq2AtdXBio.cX0", "John the Ripper"}, {"$4$2tgCi76D$zy7ms.v1Y8HcsasTaR8n/Ng8GH4dhPv4ozihbM4JMNSJUmw7wVKbcqksefn7nVT.WrN18fV8i1yh7Gmq.cXC", "DragonFly BSD"}, {NULL} }; static struct fmt_tests tests_64[] = { {"$4$7E48ul$9or6.L/T.iChtPIGY4.vIgdYEmMkTW7Ru4OJxtGJtonCQo.wu3.bS4UPlUc2B8CAfGo1Oi5PgQvfhzNQ.A8v", "magnum"}, {"$4$Hz$Mujq0GrjuRtPhcM/0rOfbr2l9fXGfVwKAuL9oL5IH.RnOO1zcgG/S6rSIrebK4g0BEgKGKc0zmWpnk3O..uR", ""}, {"$4$W$.eHqh7OeyhVkBG0lCuUFnEShQq3tZt1QOLUx/9vIt3p56rUMCu2w7iQof7HwWa1pJwcBpPG.7KK3Pcce.oFX", "password"}, {"$4$dw7uRHW$17b2EzV3m0ziCLQoSKzUElTVgkL7cHXQzZzeeuNnkee/bchs0VHGqzjXrMZtWVfK2OW8.GfHvtZgzqGF.IUZ", "John the Ripper"}, {"$4$2tgCi76D$NL8CBWreQkoaVeGVL/a27ZrwYq6M8mlNt.uqc9E9.OiANu6JHdQy2r6J4uAZuD7wKqAQier1YVL7M0IF.gvi", "DragonFly BSD"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out) [(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)]; static char *cur_salt; static int salt_len; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *pos, *start; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ciphertext += FORMAT_TAG_LEN; for (pos = ciphertext; *pos && *pos != '$'; pos++); if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0; start = ++pos; while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++; if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0; return 1; } #define TO_BINARY(b1, b2, b3) \ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \ ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out[b1] = value >> 16; \ out[b2] = value >> 8; \ out[b3] = value; // Don't copy this code without realising it mimics bugs in the original code! // We are actually missing the last 16 bits with this implementation. static void *get_binary(char *ciphertext) { static uint32_t outbuf[BINARY_SIZE/4]; uint32_t value; char *pos; unsigned char *out = (unsigned char*)outbuf; int i; memset(outbuf, 0, sizeof(outbuf)); pos = strrchr(ciphertext, '$') + 1; for (i = 0; i < 20; i++) { TO_BINARY(i, i + 21, i + 42); } value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); out[20] = value >> 16; out[41] = value >> 8; return (void *)out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { SHA512_CTX ctx; SHA512_Init(&ctx); /* First the password */ SHA512_Update(&ctx, saved_key[index], saved_len[index]); /* Then the salt, including the $4$ magic */ SHA512_Update(&ctx, cur_salt, salt_len); SHA512_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static void set_salt(void *salt) { salt_len = (int)*(char*)salt; cur_salt = (char*)salt + 1; } // For 32-bit version of the bug, our magic is "$4$\0" static void *get_salt_32(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_32); ciphertext += FORMAT_TAG_LEN; strcpy(&out[1], FORMAT_TAG); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[5], ciphertext, len); out[0] = len + 4; return out; } // For 64-bit version of the bug, our magic is "$4$\0/etc" static void *get_salt_64(char *ciphertext) { static char *out; int len; if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD); memset(out, 0, SALT_SIZE_64); ciphertext += FORMAT_TAG_LEN; memcpy(&out[1], "$4$\0/etc", 8); for (len = 0; ciphertext[len] != '$'; len++); memcpy(&out[9], ciphertext, len); out[0] = len + 8; return out; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], USED_BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], USED_BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { unsigned char *s = (unsigned char*)salt + 1; unsigned int hash = 5381; unsigned int i; for (i = 0; i < *(unsigned char*)salt; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_dragonfly4_32 = { { FORMAT_LABEL_32, FORMAT_NAME_32, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, USED_BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_32, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests_32 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_32, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_dragonfly4_64 = { { FORMAT_LABEL_64, FORMAT_NAME_64, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE_64, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, tests_64 }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt_64, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
Interp1PrimFifthOrderCRWENOChar.c
/*! @file Interp1PrimFifthOrderCRWENOChar.c @author Debojyoti Ghosh @brief Characteristic-based CRWENO5 Scheme */ #include <stdio.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <tridiagLU.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 3 /*! @brief 5th order CRWENO reconstruction (characteristic-based) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the fifth order CRWENO scheme on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order CRWENO numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three 3rd order methods: \f{align}{ &\ \omega_1\ \times\ \left[ \frac{2}{3}\hat{\alpha}^k_{j-1/2} + \frac{1}{3}\hat{\alpha}^k_{j+1/2} = \frac{1}{6} \left( f_{j-1} + 5f_j \right) \right]\\ + &\ \omega_2\ \times\ \left[ \frac{1}{3}\hat{\alpha}^k_{j-1/2}+\frac{2}{3}\hat{\alpha}^k_{j+1/2} = \frac{1}{6} \left( 5f_j + f_{j+1} \right) \right] \\ + &\ \omega_3\ \times\ \left[ \frac{2}{3}\hat{\alpha}^k_{j+1/2} + \frac{1}{3}\hat{\alpha}^k_{j+3/2} = \frac{1}{6} \left( f_j + 5f_{j+1} \right) \right] \\ \Rightarrow &\ \left(\frac{2}{3}\omega_1+\frac{1}{3}\omega_2\right)\hat{\alpha}^k_{j-1/2} + \left[\frac{1}{3}\omega_1+\frac{2}{3}(\omega_2+\omega_3)\right]\hat{\alpha}^k_{j+1/2} + \frac{1}{3}\omega_3\hat{\alpha}^k_{j+3/2} = \frac{\omega_1}{6}{\alpha}^k_{j-1} + \frac{5(\omega_1+\omega_2)+\omega_3}{6}{\alpha}^k_j + \frac{\omega_2+5\omega_3}{6}{\alpha}^k_{j+1}, \f} where \f{equation}{ \alpha^k = {\bf l}_k \cdot {\bf f},\ k=1,\cdots,n \f} is the \f$k\f$-th characteristic quantity, and \f${\bf l}_k\f$ is the \f$k\f$-th left eigenvector, \f${\bf r}_k\f$ is the \f$k\f$-th right eigenvector, and \f$n\f$ is #HyPar::nvars. The nonlinear weights \f$\omega_k; k=1,2,3\f$ are the WENO weights computed in WENOFifthOrderCalculateWeightsChar(). The resulting block tridiagonal system is solved using blocktridiagLU() (see also #TridiagLU, tridiagLU.h). The final interpolated function is computed from the interpolated characteristic quantities as: \f{equation}{ \hat{\bf f}_{j+1/2} = \sum_{k=1}^n \alpha^k_{j+1/2} {\bf r}_k \f} \b Implementation \b Notes: + This method assumes a uniform grid in the spatial dimension corresponding to the interpolation. + The method described above corresponds to a left-biased interpolation. The corresponding right-biased interpolation can be obtained by reflecting the equations about interface j+1/2. + The left and right eigenvectors are computed at an averaged quantity at j+1/2. Thus, this function requires functions to compute the average state, and the left and right eigenvectors. These are provided by the physical model through - #HyPar::GetLeftEigenvectors() - #HyPar::GetRightEigenvectors() - #HyPar::AveragingFunction() If these functions are not provided by the physical model, then a characteristic-based interpolation cannot be used. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). \b Reference: + Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with Weighted ENO Limiting for Hyperbolic Conservation Laws, SIAM Journal on Scientific Computing, 34 (3), 2012, A1678–A1706, http://dx.doi.org/10.1137/110857659 + Ghosh, D., Constantinescu, E. M., Brown, J., Efficient Implementation of Nonlinear Compact Schemes on Massively Parallel Platforms, SIAM Journal on Scientific Computing, 37 (3), 2015, C354–C383, http://dx.doi.org/10.1137/140989261 */ int Interp1PrimFifthOrderCRWENOChar( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; MPIVariables *mpi = (MPIVariables*) m; CompactScheme *compact= (CompactScheme*) solver->compact; WENOParameters *weno = (WENOParameters*) solver->interp; TridiagLU *lu = (TridiagLU*) solver->lusolver; int sys,Nsys,d,v,k; _DECLARE_IERR_; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; /* define some constants */ static const double one_third = 1.0/3.0; static const double one_sixth = 1.0/6.0; double *ww1, *ww2, *ww3; ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; /* calculate total number of block tridiagonal systems to solve */ _ArrayProduct1D_(bounds_outer,ndims,Nsys); /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */ double R[nvars*nvars], L[nvars*nvars], uavg[nvars]; /* Allocate arrays for tridiagonal system */ double *A = compact->A; double *B = compact->B; double *C = compact->C; double *F = compact->R; #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI) for (sys=0; sys<Nsys; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1,qm2,qm3,qp1,qp2; if (upw > 0) { indexC[dir] = indexI[dir]-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3); indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); } else { indexC[dir] = indexI[dir]+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3); indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); } int p; /* 1D index of the interface */ _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); /* find averaged state at this interface */ IERR solver->AveragingFunction(uavg,&u[nvars*qm1],&u[nvars*qp1],solver->physics); CHECKERR(ierr); /* Get the left and right eigenvectors */ IERR solver->GetLeftEigenvectors (uavg,L,solver->physics,dir); CHECKERR(ierr); IERR solver->GetRightEigenvectors (uavg,R,solver->physics,dir); CHECKERR(ierr); for (v=0; v<nvars; v++) { /* calculate the characteristic flux components along this characteristic */ double fm3, fm2, fm1, fp1, fp2; fm3 = fm2 = fm1 = fp1 = fp2 = 0; for (k = 0; k < nvars; k++) { fm3 += L[v*nvars+k] * fC[qm3*nvars+k]; fm2 += L[v*nvars+k] * fC[qm2*nvars+k]; fm1 += L[v*nvars+k] * fC[qm1*nvars+k]; fp1 += L[v*nvars+k] * fC[qp1*nvars+k]; fp2 += L[v*nvars+k] * fC[qp2*nvars+k]; } /* Candidate stencils and their optimal weights*/ double f1, f2, f3; if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ f1 = (2*one_sixth)*fm3 - (7.0*one_sixth)*fm2 + (11.0*one_sixth)*fm1; f2 = (-one_sixth)*fm2 + (5.0*one_sixth)*fm1 + (2*one_sixth)*fp1; f3 = (2*one_sixth)*fm1 + (5*one_sixth)*fp1 - (one_sixth)*fp2; } else { /* CRWENO5 at the interior points */ f1 = (one_sixth) * (fm2 + 5*fm1); f2 = (one_sixth) * (5*fm1 + fp1); f3 = (one_sixth) * (fm1 + 5*fp1); } /* calculate WENO weights */ double w1,w2,w3; w1 = *(ww1+p*nvars+v); w2 = *(ww2+p*nvars+v); w3 = *(ww3+p*nvars+v); if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { for (k=0; k<nvars; k++) { A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0; C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0; B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = L[v*nvars+k]; } } else { if (upw > 0) { for (k=0; k<nvars; k++) { A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((2*one_third)*w1 + (one_third)*w2) * L[v*nvars+k]; B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w1 + (2*one_third)*(w2+w3)) * L[v*nvars+k]; C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w3) * L[v*nvars+k]; } } else { for (k=0; k<nvars; k++) { C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((2*one_third)*w1 + (one_third)*w2) * L[v*nvars+k]; B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w1 + (2*one_third)*(w2+w3)) * L[v*nvars+k]; A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w3) * L[v*nvars+k]; } } } F[(Nsys*indexI[dir]+sys)*nvars+v] = w1*f1 + w2*f2 + w3*f3; } } } #ifdef serial /* Solve the tridiagonal system */ IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,NULL); CHECKERR(ierr); #else /* Solve the tridiagonal system */ /* all processes except the last will solve without the last interface to avoid overlap */ if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR blocktridiagLU(A,B,C,F,dim[dir] ,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr); } else { IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr); } /* Now get the solution to the last interface from the next proc */ double *sendbuf = compact->sendbuf; double *recvbuf = compact->recvbuf; MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL}; if (mpi->ip[dir]) for (d=0; d<Nsys*nvars; d++) sendbuf[d] = F[d]; if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]); if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]); MPI_Waitall(2,&req[0],MPI_STATUS_IGNORE); if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys*nvars; d++) F[d+Nsys*nvars*dim[dir]] = recvbuf[d]; #endif /* save the solution to fI */ #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI) for (sys=0; sys<Nsys; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); _ArrayCopy1D_((F+sys*nvars+Nsys*nvars*indexI[dir]),(fI+nvars*p),nvars); } } return(0); }
/* * ! @file Interp1PrimFifthOrderCRWENOChar.c @author Debojyoti Ghosh @brief * Characteristic-based CRWENO5 Scheme */ #include <stdio.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <tridiagLU.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /* * ! \def _MINIMUM_GHOSTS_ Minimum number of ghost points required for this * interpolation method. */ #define _MINIMUM_GHOSTS_ 3 /* * ! @brief 5th order CRWENO reconstruction (characteristic-based) on a * uniform grid * * Computes the interpolated values of the first primitive of a function \f${\bf * f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values * of the function using the fifth order CRWENO scheme on a uniform grid. The * first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ * that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = * \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf * u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial * coordinate along the dimension of the interpolation. This function * computes the 5th order CRWENO numerical approximation \f$\hat{\bf * f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three * 3rd order methods: \f{align}{ &\ \omega_1\ \times\ \left[ * \frac{2}{3}\hat{\alpha}^k_{j-1/2} + \frac{1}{3}\hat{\alpha}^k_{j+1/2} = * \frac{1}{6} \left( f_{j-1} + 5f_j \right) \right]\\ + &\ \omega_2\ \times\ * \left[ \frac{1}{3}\hat{\alpha}^k_{j-1/2}+\frac{2}{3}\hat{\alpha}^k_{j+1/2} * = \frac{1}{6} \left( 5f_j + f_{j+1} \right) \right] \\ + &\ \omega_3\ * \times\ \left[ \frac{2}{3}\hat{\alpha}^k_{j+1/2} + * \frac{1}{3}\hat{\alpha}^k_{j+3/2} = \frac{1}{6} \left( f_j + 5f_{j+1} * \right) \right] \\ \Rightarrow &\ * \left(\frac{2}{3}\omega_1+\frac{1}{3}\omega_2\right)\hat{\alpha}^k_{j-1/2} * + * \left[\frac{1}{3}\omega_1+\frac{2}{3}(\omega_2+\omega_3)\right]\hat{\alpha} * ^k_{j+1/2} + \frac{1}{3}\omega_3\hat{\alpha}^k_{j+3/2} = * \frac{\omega_1}{6}{\alpha}^k_{j-1} + * \frac{5(\omega_1+\omega_2)+\omega_3}{6}{\alpha}^k_j + * \frac{\omega_2+5\omega_3}{6}{\alpha}^k_{j+1}, \f} where \f{equation}{ * \alpha^k = {\bf l}_k \cdot {\bf f},\ k=1,\cdots,n \f} is the \f$k\f$-th * characteristic quantity, and \f${\bf l}_k\f$ is the \f$k\f$-th left * eigenvector, \f${\bf r}_k\f$ is the \f$k\f$-th right eigenvector, and * \f$n\f$ is #HyPar::nvars. The nonlinear weights \f$\omega_k; k=1,2,3\f$ * are the WENO weights computed in WENOFifthOrderCalculateWeightsChar(). The * resulting block tridiagonal system is solved using blocktridiagLU() (see * also #TridiagLU, tridiagLU.h). The final interpolated function is computed * from the interpolated characteristic quantities as: \f{equation}{ \hat{\bf * f}_{j+1/2} = \sum_{k=1}^n \alpha^k_{j+1/2} {\bf r}_k \f} * * \b Implementation \b Notes: + This method assumes a uniform grid in the * spatial dimension corresponding to the interpolation. + The method * described above corresponds to a left-biased interpolation. The * corresponding right-biased interpolation can be obtained by reflecting the * equations about interface j+1/2. + The left and right eigenvectors are * computed at an averaged quantity at j+1/2. Thus, this function requires * functions to compute the average state, and the left and right * eigenvectors. These are provided by the physical model through - * #HyPar::GetLeftEigenvectors() - #HyPar::GetRightEigenvectors() - * #HyPar::AveragingFunction() * * If these functions are not provided by the physical model, then a * characteristic-based interpolation cannot be used. + The function computes * the interpolant for the entire grid in one call. It loops over all the * grid lines along the interpolation direction and carries out the 1D * interpolation along these grid lines. + Location of cell-centers and cell * interfaces along the spatial dimension of the interpolation is shown in * the following figure: @image html chap1_1Ddomain.png @image latex * chap1_1Ddomain.eps width=0.9\textwidth * * \b Function \b arguments: * * Argument | Type | Explanation --------- | --------- | * --------------------------------------------- fI | double* | * Array to hold the computed interpolant at the grid interfaces. This array * must have the same layout as the solution, but with \b no \b ghost \b * points. Its size should be the same as u in all dimensions, except dir * (the dimension along which to interpolate) along which it should be larger * by 1 (number of interfaces is 1 more than the number of interior cell * centers). fC | double* | Array with the cell-centered values of * the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have * the same layout and size as the solution, \b with \b ghost \b points. u * | double* | The solution array \f${\bf u}\f$ (with ghost points). If the * interpolation is characteristic based, this is needed to compute the * eigendecomposition. For a multidimensional problem, the layout is as * follows: u is a contiguous 1D array of size * (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional * solution, with the following ordering - nvars, dim[0], dim[1], ..., * dim[D-1], where nvars is the number of solution components * (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the * number of spatial dimensions. x | double* | The grid array (with * ghost points). This is used only by non-uniform-grid interpolation * methods. For multidimensional problems, the layout is as follows: x is a * contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial * coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial * coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so * forth. upw | int | Upwinding direction: if positive, a * left-biased interpolant will be computed; if negative, a right-biased * interpolant will be computed. If the interpolation method is central, then * this has no effect. dir | int | Spatial dimension along which * to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | * void* | Solver object of type #HyPar: the following variables are * needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. * m | void* | MPI object of type #MPIVariables: this is needed * only by compact interpolation method that need to solve a global implicit * system across MPI ranks. uflag | int | A flag indicating if the * function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf * u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). * * * \b Reference: + Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with * Weighted ENO Limiting for Hyperbolic Conservation Laws, SIAM Journal on * Scientific Computing, 34 (3), 2012, A1678–A1706, * http://dx.doi.org/10.1137/110857659 + Ghosh, D., Constantinescu, E. M., * Brown, J., Efficient Implementation of Nonlinear Compact Schemes on * Massively Parallel Platforms, SIAM Journal on Scientific Computing, 37 * (3), 2015, C354–C383, http://dx.doi.org/10.1137/140989261 */ int Interp1PrimFifthOrderCRWENOChar( double *fI, /* !< Array of interpolated * function values at the * interfaces */ double *fC, /* !< Array of cell-centered * values of the function * \f${\bf f}\left({\bf * u}\right)\f$ */ double *u, /* !< Array of cell-centered * values of the solution * \f${\bf u}\f$ */ double *x, /* !< Grid coordinates */ int upw, /* !< Upwind direction (left * or right biased) */ int dir, /* !< Spatial dimension along * which to interpolation */ void *s, /* !< Object of type #HyPar * containing solver-related * variables */ void *m, /* !< Object of type * #MPIVariables containing * MPI-related variables */ int uflag /* !< Flag to indicate if * \f$f(u) \equiv u\f$, i.e, * if the solution is being * reconstructed */ ) { HyPar *solver = (HyPar *) s; MPIVariables *mpi = (MPIVariables *) m; CompactScheme *compact = (CompactScheme *) solver->compact; WENOParameters *weno = (WENOParameters *) solver->interp; TridiagLU *lu = (TridiagLU *) solver->lusolver; int sys, Nsys, d, v, k; _DECLARE_IERR_; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; /* define some constants */ static const double one_third = 1.0 / 3.0; static const double one_sixth = 1.0 / 6.0; double *ww1, *ww2, *ww3; ww1 = weno->w1 + (upw < 0 ? 2 * weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww2 = weno->w2 + (upw < 0 ? 2 * weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww3 = weno->w3 + (upw < 0 ? 2 * weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; /* * create index and bounds for the outer loop, i.e., to loop over all 1D * lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim, bounds_outer, ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim, bounds_inter, ndims); bounds_inter[dir] += 1; /* calculate total number of block tridiagonal systems to solve */ _ArrayProduct1D_(bounds_outer, ndims, Nsys); /* * allocate arrays for the averaged state, eigenvectors and * characteristic interpolated f */ double R[nvars * nvars], L[nvars * nvars], uavg[nvars]; /* Allocate arrays for tridiagonal system */ double *A = compact->A; double *B = compact->B; double *C = compact->C; double *F = compact->R; for (sys = 0; sys < Nsys; sys++) { _ArrayIndexnD_(ndims, sys, bounds_outer, index_outer, 0); _ArrayCopy1D_(index_outer, indexC, ndims); _ArrayCopy1D_(index_outer, indexI, ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir] + 1; indexI[dir]++) { int qm1, qm2, qm3, qp1, qp2; if (upw > 0) { indexC[dir] = indexI[dir] - 3; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm3); indexC[dir] = indexI[dir] - 2; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm2); indexC[dir] = indexI[dir] - 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm1); indexC[dir] = indexI[dir]; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp1); indexC[dir] = indexI[dir] + 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp2); } else { indexC[dir] = indexI[dir] + 2; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm3); indexC[dir] = indexI[dir] + 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm2); indexC[dir] = indexI[dir]; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm1); indexC[dir] = indexI[dir] - 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp1); indexC[dir] = indexI[dir] - 2; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp2); } int p; /* 1D index of the interface */ _ArrayIndex1D_(ndims, bounds_inter, indexI, 0, p); /* find averaged state at this interface */ IERR solver->AveragingFunction(uavg, &u[nvars * qm1], &u[nvars * qp1], solver->physics); CHECKERR(ierr); /* Get the left and right eigenvectors */ IERR solver->GetLeftEigenvectors(uavg, L, solver->physics, dir); CHECKERR(ierr); IERR solver->GetRightEigenvectors(uavg, R, solver->physics, dir); CHECKERR(ierr); for (v = 0; v < nvars; v++) { /* * calculate the characteristic flux components along this * characteristic */ double fm3, fm2, fm1, fp1, fp2; fm3 = fm2 = fm1 = fp1 = fp2 = 0; for (k = 0; k < nvars; k++) { fm3 += L[v * nvars + k] * fC[qm3 * nvars + k]; fm2 += L[v * nvars + k] * fC[qm2 * nvars + k]; fm1 += L[v * nvars + k] * fC[qm1 * nvars + k]; fp1 += L[v * nvars + k] * fC[qp1 * nvars + k]; fp2 += L[v * nvars + k] * fC[qp2 * nvars + k]; } /* Candidate stencils and their optimal weights */ double f1, f2, f3; if (((mpi->ip[dir] == 0) && (indexI[dir] == 0)) || ((mpi->ip[dir] == mpi->iproc[dir] - 1) && (indexI[dir] == dim[dir]))) { /* Use WENO5 at the physical boundaries */ f1 = (2 * one_sixth) * fm3 - (7.0 * one_sixth) * fm2 + (11.0 * one_sixth) * fm1; f2 = (-one_sixth) * fm2 + (5.0 * one_sixth) * fm1 + (2 * one_sixth) * fp1; f3 = (2 * one_sixth) * fm1 + (5 * one_sixth) * fp1 - (one_sixth) * fp2; } else { /* CRWENO5 at the interior points */ f1 = (one_sixth) * (fm2 + 5 * fm1); f2 = (one_sixth) * (5 * fm1 + fp1); f3 = (one_sixth) * (fm1 + 5 * fp1); } /* calculate WENO weights */ double w1, w2, w3; w1 = *(ww1 + p * nvars + v); w2 = *(ww2 + p * nvars + v); w3 = *(ww3 + p * nvars + v); if (((mpi->ip[dir] == 0) && (indexI[dir] == 0)) || ((mpi->ip[dir] == mpi->iproc[dir] - 1) && (indexI[dir] == dim[dir]))) { for (k = 0; k < nvars; k++) { A[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = 0.0; C[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = 0.0; B[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = L[v * nvars + k]; } } else { if (upw > 0) { for (k = 0; k < nvars; k++) { A[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((2 * one_third) * w1 + (one_third) * w2) * L[v * nvars + k]; B[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w1 + (2 * one_third) * (w2 + w3)) * L[v * nvars + k]; C[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w3) * L[v * nvars + k]; } } else { for (k = 0; k < nvars; k++) { C[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((2 * one_third) * w1 + (one_third) * w2) * L[v * nvars + k]; B[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w1 + (2 * one_third) * (w2 + w3)) * L[v * nvars + k]; A[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w3) * L[v * nvars + k]; } } } F[(Nsys * indexI[dir] + sys) * nvars + v] = w1 * f1 + w2 * f2 + w3 * f3; } } } #ifdef serial /* Solve the tridiagonal system */ IERR blocktridiagLU(A, B, C, F, dim[dir] + 1, Nsys, nvars, lu, NULL); CHECKERR(ierr); #else /* Solve the tridiagonal system */ /* * all processes except the last will solve without the last interface to * avoid overlap */ if (mpi->ip[dir] != mpi->iproc[dir] - 1) { IERR blocktridiagLU(A, B, C, F, dim[dir], Nsys, nvars, lu, &mpi->comm[dir]); CHECKERR(ierr); } else { IERR blocktridiagLU(A, B, C, F, dim[dir] + 1, Nsys, nvars, lu, &mpi->comm[dir]); CHECKERR(ierr); } /* Now get the solution to the last interface from the next proc */ double *sendbuf = compact->sendbuf; double *recvbuf = compact->recvbuf; MPI_Request req[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL}; if (mpi->ip[dir]) for (d = 0; d < Nsys * nvars; d++) sendbuf[d] = F[d]; if (mpi->ip[dir] != mpi->iproc[dir] - 1) MPI_Irecv(recvbuf, Nsys * nvars, MPI_DOUBLE, mpi->ip[dir] + 1, 214, mpi->comm[dir], &req[0]); if (mpi->ip[dir]) MPI_Isend(sendbuf, Nsys * nvars, MPI_DOUBLE, mpi->ip[dir] - 1, 214, mpi->comm[dir], &req[1]); MPI_Waitall(2, &req[0], MPI_STATUS_IGNORE); if (mpi->ip[dir] != mpi->iproc[dir] - 1) for (d = 0; d < Nsys * nvars; d++) F[d + Nsys * nvars * dim[dir]] = recvbuf[d]; #endif /* save the solution to fI */ for (sys = 0; sys < Nsys; sys++) { _ArrayIndexnD_(ndims, sys, bounds_outer, index_outer, 0); _ArrayCopy1D_(index_outer, indexI, ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir] + 1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims, bounds_inter, indexI, 0, p); _ArrayCopy1D_((F + sys * nvars + Nsys * nvars * indexI[dir]), (fI + nvars * p), nvars); } } return (0); }
/* * ! @file Interp1PrimFifthOrderCRWENOChar.c @author Debojyoti Ghosh @brief * Characteristic-based CRWENO5 Scheme */ #include <stdio.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <tridiagLU.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /* * ! \def _MINIMUM_GHOSTS_ Minimum number of ghost points required for this * interpolation method. */ #define _MINIMUM_GHOSTS_ 3 /* * ! @brief 5th order CRWENO reconstruction (characteristic-based) on a * uniform grid * * Computes the interpolated values of the first primitive of a function \f${\bf * f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values * of the function using the fifth order CRWENO scheme on a uniform grid. The * first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ * that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = * \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf * u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial * coordinate along the dimension of the interpolation. This function * computes the 5th order CRWENO numerical approximation \f$\hat{\bf * f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three * 3rd order methods: \f{align}{ &\ \omega_1\ \times\ \left[ * \frac{2}{3}\hat{\alpha}^k_{j-1/2} + \frac{1}{3}\hat{\alpha}^k_{j+1/2} = * \frac{1}{6} \left( f_{j-1} + 5f_j \right) \right]\\ + &\ \omega_2\ \times\ * \left[ \frac{1}{3}\hat{\alpha}^k_{j-1/2}+\frac{2}{3}\hat{\alpha}^k_{j+1/2} * = \frac{1}{6} \left( 5f_j + f_{j+1} \right) \right] \\ + &\ \omega_3\ * \times\ \left[ \frac{2}{3}\hat{\alpha}^k_{j+1/2} + * \frac{1}{3}\hat{\alpha}^k_{j+3/2} = \frac{1}{6} \left( f_j + 5f_{j+1} * \right) \right] \\ \Rightarrow &\ * \left(\frac{2}{3}\omega_1+\frac{1}{3}\omega_2\right)\hat{\alpha}^k_{j-1/2} * + * \left[\frac{1}{3}\omega_1+\frac{2}{3}(\omega_2+\omega_3)\right]\hat{\alpha} * ^k_{j+1/2} + \frac{1}{3}\omega_3\hat{\alpha}^k_{j+3/2} = * \frac{\omega_1}{6}{\alpha}^k_{j-1} + * \frac{5(\omega_1+\omega_2)+\omega_3}{6}{\alpha}^k_j + * \frac{\omega_2+5\omega_3}{6}{\alpha}^k_{j+1}, \f} where \f{equation}{ * \alpha^k = {\bf l}_k \cdot {\bf f},\ k=1,\cdots,n \f} is the \f$k\f$-th * characteristic quantity, and \f${\bf l}_k\f$ is the \f$k\f$-th left * eigenvector, \f${\bf r}_k\f$ is the \f$k\f$-th right eigenvector, and * \f$n\f$ is #HyPar::nvars. The nonlinear weights \f$\omega_k; k=1,2,3\f$ * are the WENO weights computed in WENOFifthOrderCalculateWeightsChar(). The * resulting block tridiagonal system is solved using blocktridiagLU() (see * also #TridiagLU, tridiagLU.h). The final interpolated function is computed * from the interpolated characteristic quantities as: \f{equation}{ \hat{\bf * f}_{j+1/2} = \sum_{k=1}^n \alpha^k_{j+1/2} {\bf r}_k \f} * * \b Implementation \b Notes: + This method assumes a uniform grid in the * spatial dimension corresponding to the interpolation. + The method * described above corresponds to a left-biased interpolation. The * corresponding right-biased interpolation can be obtained by reflecting the * equations about interface j+1/2. + The left and right eigenvectors are * computed at an averaged quantity at j+1/2. Thus, this function requires * functions to compute the average state, and the left and right * eigenvectors. These are provided by the physical model through - * #HyPar::GetLeftEigenvectors() - #HyPar::GetRightEigenvectors() - * #HyPar::AveragingFunction() * * If these functions are not provided by the physical model, then a * characteristic-based interpolation cannot be used. + The function computes * the interpolant for the entire grid in one call. It loops over all the * grid lines along the interpolation direction and carries out the 1D * interpolation along these grid lines. + Location of cell-centers and cell * interfaces along the spatial dimension of the interpolation is shown in * the following figure: @image html chap1_1Ddomain.png @image latex * chap1_1Ddomain.eps width=0.9\textwidth * * \b Function \b arguments: * * Argument | Type | Explanation --------- | --------- | * --------------------------------------------- fI | double* | * Array to hold the computed interpolant at the grid interfaces. This array * must have the same layout as the solution, but with \b no \b ghost \b * points. Its size should be the same as u in all dimensions, except dir * (the dimension along which to interpolate) along which it should be larger * by 1 (number of interfaces is 1 more than the number of interior cell * centers). fC | double* | Array with the cell-centered values of * the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have * the same layout and size as the solution, \b with \b ghost \b points. u * | double* | The solution array \f${\bf u}\f$ (with ghost points). If the * interpolation is characteristic based, this is needed to compute the * eigendecomposition. For a multidimensional problem, the layout is as * follows: u is a contiguous 1D array of size * (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional * solution, with the following ordering - nvars, dim[0], dim[1], ..., * dim[D-1], where nvars is the number of solution components * (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the * number of spatial dimensions. x | double* | The grid array (with * ghost points). This is used only by non-uniform-grid interpolation * methods. For multidimensional problems, the layout is as follows: x is a * contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial * coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial * coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so * forth. upw | int | Upwinding direction: if positive, a * left-biased interpolant will be computed; if negative, a right-biased * interpolant will be computed. If the interpolation method is central, then * this has no effect. dir | int | Spatial dimension along which * to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | * void* | Solver object of type #HyPar: the following variables are * needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. * m | void* | MPI object of type #MPIVariables: this is needed * only by compact interpolation method that need to solve a global implicit * system across MPI ranks. uflag | int | A flag indicating if the * function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf * u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). * * * \b Reference: + Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with * Weighted ENO Limiting for Hyperbolic Conservation Laws, SIAM Journal on * Scientific Computing, 34 (3), 2012, A1678–A1706, * http://dx.doi.org/10.1137/110857659 + Ghosh, D., Constantinescu, E. M., * Brown, J., Efficient Implementation of Nonlinear Compact Schemes on * Massively Parallel Platforms, SIAM Journal on Scientific Computing, 37 * (3), 2015, C354–C383, http://dx.doi.org/10.1137/140989261 */ int Interp1PrimFifthOrderCRWENOChar( double *fI, /* !< Array of interpolated * function values at the * interfaces */ double *fC, /* !< Array of cell-centered * values of the function * \f${\bf f}\left({\bf * u}\right)\f$ */ double *u, /* !< Array of cell-centered * values of the solution * \f${\bf u}\f$ */ double *x, /* !< Grid coordinates */ int upw, /* !< Upwind direction (left * or right biased) */ int dir, /* !< Spatial dimension along * which to interpolation */ void *s, /* !< Object of type #HyPar * containing solver-related * variables */ void *m, /* !< Object of type * #MPIVariables containing * MPI-related variables */ int uflag /* !< Flag to indicate if * \f$f(u) \equiv u\f$, i.e, * if the solution is being * reconstructed */ ) { HyPar *solver = (HyPar *) s; MPIVariables *mpi = (MPIVariables *) m; CompactScheme *compact = (CompactScheme *) solver->compact; WENOParameters *weno = (WENOParameters *) solver->interp; TridiagLU *lu = (TridiagLU *) solver->lusolver; int sys, Nsys, d, v, k; _DECLARE_IERR_; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; /* define some constants */ static const double one_third = 1.0 / 3.0; static const double one_sixth = 1.0 / 6.0; double *ww1, *ww2, *ww3; ww1 = weno->w1 + (upw < 0 ? 2 * weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww2 = weno->w2 + (upw < 0 ? 2 * weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww3 = weno->w3 + (upw < 0 ? 2 * weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; /* * create index and bounds for the outer loop, i.e., to loop over all 1D * lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim, bounds_outer, ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim, bounds_inter, ndims); bounds_inter[dir] += 1; /* calculate total number of block tridiagonal systems to solve */ _ArrayProduct1D_(bounds_outer, ndims, Nsys); /* * allocate arrays for the averaged state, eigenvectors and * characteristic interpolated f */ double R[nvars * nvars], L[nvars * nvars], uavg[nvars]; /* Allocate arrays for tridiagonal system */ double *A = compact->A; double *B = compact->B; double *C = compact->C; double *F = compact->R; #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI) for (sys = 0; sys < Nsys; sys++) { _ArrayIndexnD_(ndims, sys, bounds_outer, index_outer, 0); _ArrayCopy1D_(index_outer, indexC, ndims); _ArrayCopy1D_(index_outer, indexI, ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir] + 1; indexI[dir]++) { int qm1, qm2, qm3, qp1, qp2; if (upw > 0) { indexC[dir] = indexI[dir] - 3; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm3); indexC[dir] = indexI[dir] - 2; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm2); indexC[dir] = indexI[dir] - 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm1); indexC[dir] = indexI[dir]; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp1); indexC[dir] = indexI[dir] + 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp2); } else { indexC[dir] = indexI[dir] + 2; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm3); indexC[dir] = indexI[dir] + 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm2); indexC[dir] = indexI[dir]; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qm1); indexC[dir] = indexI[dir] - 1; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp1); indexC[dir] = indexI[dir] - 2; _ArrayIndex1D_(ndims, dim, indexC, ghosts, qp2); } int p; /* 1D index of the interface */ _ArrayIndex1D_(ndims, bounds_inter, indexI, 0, p); /* find averaged state at this interface */ IERR solver->AveragingFunction(uavg, &u[nvars * qm1], &u[nvars * qp1], solver->physics); CHECKERR(ierr); /* Get the left and right eigenvectors */ IERR solver->GetLeftEigenvectors(uavg, L, solver->physics, dir); CHECKERR(ierr); IERR solver->GetRightEigenvectors(uavg, R, solver->physics, dir); CHECKERR(ierr); for (v = 0; v < nvars; v++) { /* * calculate the characteristic flux components along this * characteristic */ double fm3, fm2, fm1, fp1, fp2; fm3 = fm2 = fm1 = fp1 = fp2 = 0; for (k = 0; k < nvars; k++) { fm3 += L[v * nvars + k] * fC[qm3 * nvars + k]; fm2 += L[v * nvars + k] * fC[qm2 * nvars + k]; fm1 += L[v * nvars + k] * fC[qm1 * nvars + k]; fp1 += L[v * nvars + k] * fC[qp1 * nvars + k]; fp2 += L[v * nvars + k] * fC[qp2 * nvars + k]; } /* Candidate stencils and their optimal weights */ double f1, f2, f3; if (((mpi->ip[dir] == 0) && (indexI[dir] == 0)) || ((mpi->ip[dir] == mpi->iproc[dir] - 1) && (indexI[dir] == dim[dir]))) { /* Use WENO5 at the physical boundaries */ f1 = (2 * one_sixth) * fm3 - (7.0 * one_sixth) * fm2 + (11.0 * one_sixth) * fm1; f2 = (-one_sixth) * fm2 + (5.0 * one_sixth) * fm1 + (2 * one_sixth) * fp1; f3 = (2 * one_sixth) * fm1 + (5 * one_sixth) * fp1 - (one_sixth) * fp2; } else { /* CRWENO5 at the interior points */ f1 = (one_sixth) * (fm2 + 5 * fm1); f2 = (one_sixth) * (5 * fm1 + fp1); f3 = (one_sixth) * (fm1 + 5 * fp1); } /* calculate WENO weights */ double w1, w2, w3; w1 = *(ww1 + p * nvars + v); w2 = *(ww2 + p * nvars + v); w3 = *(ww3 + p * nvars + v); if (((mpi->ip[dir] == 0) && (indexI[dir] == 0)) || ((mpi->ip[dir] == mpi->iproc[dir] - 1) && (indexI[dir] == dim[dir]))) { for (k = 0; k < nvars; k++) { A[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = 0.0; C[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = 0.0; B[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = L[v * nvars + k]; } } else { if (upw > 0) { for (k = 0; k < nvars; k++) { A[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((2 * one_third) * w1 + (one_third) * w2) * L[v * nvars + k]; B[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w1 + (2 * one_third) * (w2 + w3)) * L[v * nvars + k]; C[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w3) * L[v * nvars + k]; } } else { for (k = 0; k < nvars; k++) { C[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((2 * one_third) * w1 + (one_third) * w2) * L[v * nvars + k]; B[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w1 + (2 * one_third) * (w2 + w3)) * L[v * nvars + k]; A[(Nsys * indexI[dir] + sys) * nvars * nvars + v * nvars + k] = ((one_third) * w3) * L[v * nvars + k]; } } } F[(Nsys * indexI[dir] + sys) * nvars + v] = w1 * f1 + w2 * f2 + w3 * f3; } } } #ifdef serial /* Solve the tridiagonal system */ IERR blocktridiagLU(A, B, C, F, dim[dir] + 1, Nsys, nvars, lu, NULL); CHECKERR(ierr); #else /* Solve the tridiagonal system */ /* * all processes except the last will solve without the last interface to * avoid overlap */ if (mpi->ip[dir] != mpi->iproc[dir] - 1) { IERR blocktridiagLU(A, B, C, F, dim[dir], Nsys, nvars, lu, &mpi->comm[dir]); CHECKERR(ierr); } else { IERR blocktridiagLU(A, B, C, F, dim[dir] + 1, Nsys, nvars, lu, &mpi->comm[dir]); CHECKERR(ierr); } /* Now get the solution to the last interface from the next proc */ double *sendbuf = compact->sendbuf; double *recvbuf = compact->recvbuf; MPI_Request req[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL}; if (mpi->ip[dir]) for (d = 0; d < Nsys * nvars; d++) sendbuf[d] = F[d]; if (mpi->ip[dir] != mpi->iproc[dir] - 1) MPI_Irecv(recvbuf, Nsys * nvars, MPI_DOUBLE, mpi->ip[dir] + 1, 214, mpi->comm[dir], &req[0]); if (mpi->ip[dir]) MPI_Isend(sendbuf, Nsys * nvars, MPI_DOUBLE, mpi->ip[dir] - 1, 214, mpi->comm[dir], &req[1]); MPI_Waitall(2, &req[0], MPI_STATUS_IGNORE); if (mpi->ip[dir] != mpi->iproc[dir] - 1) for (d = 0; d < Nsys * nvars; d++) F[d + Nsys * nvars * dim[dir]] = recvbuf[d]; #endif /* save the solution to fI */ #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI) for (sys = 0; sys < Nsys; sys++) { _ArrayIndexnD_(ndims, sys, bounds_outer, index_outer, 0); _ArrayCopy1D_(index_outer, indexI, ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir] + 1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims, bounds_inter, indexI, 0, p); _ArrayCopy1D_((F + sys * nvars + Nsys * nvars * indexI[dir]), (fI + nvars * p), nvars); } } return (0); }
main.c
#include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <time.h> #if defined(_OPENMP) #include <omp.h> #endif int main (int argc, char** argv) { // Initialization of variables int i, j, k, t, height = 360, width = 360, depth = 360, num_iterations = 1000, opt; float alpha = 0.1; #ifndef _OPENMP clock_t before, after; #else double before, after; #endif double time_used; // Parsing command-line options while ((opt = getopt(argc, argv, "h:w:d:t:a:")) != -1) { switch (opt) { case 'h': height = atoi(optarg); break; case 'w': width = atoi(optarg); break; case 'd': depth = atoi(optarg); break; case 't': num_iterations = atoi(optarg); break; case 'a': alpha = atof(optarg); break; default: fprintf(stderr, "Usage: %s [-h height] [-w width] [-d depth] [-t no. iterations] [-a alpha value for heat eq.]\n", argv[0]); exit(EXIT_FAILURE); } } // beta reduces the stencil operation to only require 6 flops (instead of 7) float beta = (1 - 6*alpha); // Allocate matrices float ***tmp; // temporary pointer to perform pointer swaps float ***a = (float***) malloc(height*sizeof(float**)); float ***b = (float***) malloc(height*sizeof(float**)); for (i = 0; i < height; ++i) { a[i] = (float**) malloc(width*sizeof(float*)); b[i] = (float**) malloc(width*sizeof(float*)); for (j = 0; j < width; ++j) { a[i][j] = (float*) malloc(depth*sizeof(float)); b[i][j] = (float*) malloc(depth*sizeof(float)); } } // Instantiate random values in matrices #pragma omp parallel for private(j) for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { for (k = 0; k < depth; ++k) { a[i][j][k] = (float) rand() / (float) (RAND_MAX); b[i][j][k] = a[i][j][k]; } } } // Start timer #ifndef _OPENMP before = clock(); #else before = omp_get_wtime(); #endif // Perform computations #pragma omp parallel private(t,i,j,k) { #ifdef _OPENMP #pragma omp single { printf("Using %d OpenMP threads to parallelize heat equation\n", omp_get_num_threads()); fflush(NULL); } #endif // Perform heat equation for (t = 0; t < num_iterations; ++t) { #pragma omp for for (i = 1; i < height - 1; ++i) for (j = 1; j < width - 1; ++j) for (k = 1; k < depth - 1; ++k) b[i][j][k] = beta*a[i][j][k] + alpha*( a[i+1][j][k] + a[i-1][j][k] + a[i][j+1][k] + a[i][j-1][k] + a[i][j][k+1] + a[i][j][k-1]); #pragma omp single { // pointer swap tmp = b; b = a; a = tmp; } } } // End timer and evaluate time used #ifndef _OPENMP after = clock(); time_used = (float) (after - before) / (float) CLOCKS_PER_SEC; #else after = omp_get_wtime(); time_used = after - before; #endif // deallocate matrices for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { free(a[i][j]); free(b[i][j]); } free(a[i]); free(b[i]); } free(a); free(b); // Report parameters and results float base = 1e-9*(float)num_iterations/time_used; float gflops = base*(float)(height-2)*(float)(width-2)*(float)(depth-2)*8.0; float bandwidth = base*sizeof(float)*(float)height*(float)width*(float)depth*2.0; printf("3D Grid : %d x %d x %d\n", height, width, depth); printf("Iterations : %d\n", num_iterations); printf("alpha : %g\n", alpha); printf("Time : %f s\n", time_used); printf("Throughput : %f GFLOPS\n", gflops); printf("Minimal Bandwidth : %f GB/s\n", bandwidth); return EXIT_SUCCESS; }
#include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <time.h> int main (int argc, char** argv) { // Initialization of variables int i, j, k, t, height = 360, width = 360, depth = 360, num_iterations = 1000, opt; float alpha = 0.1; #ifndef _OPENMP clock_t before, after; #else double before, after; #endif double time_used; // Parsing command-line options while ((opt = getopt(argc, argv, "h:w:d:t:a:")) != -1) { switch (opt) { case 'h': height = atoi(optarg); break; case 'w': width = atoi(optarg); break; case 'd': depth = atoi(optarg); break; case 't': num_iterations = atoi(optarg); break; case 'a': alpha = atof(optarg); break; default: fprintf(stderr, "Usage: %s [-h height] [-w width] [-d depth] [-t no. iterations] [-a alpha value for heat eq.]\n", argv[0]); exit(EXIT_FAILURE); } } // beta reduces the stencil operation to only require 6 flops (instead of 7) float beta = (1 - 6*alpha); // Allocate matrices float ***tmp; // temporary pointer to perform pointer swaps float ***a = (float***) malloc(height*sizeof(float**)); float ***b = (float***) malloc(height*sizeof(float**)); for (i = 0; i < height; ++i) { a[i] = (float**) malloc(width*sizeof(float*)); b[i] = (float**) malloc(width*sizeof(float*)); for (j = 0; j < width; ++j) { a[i][j] = (float*) malloc(depth*sizeof(float)); b[i][j] = (float*) malloc(depth*sizeof(float)); } } // Instantiate random values in matrices for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { for (k = 0; k < depth; ++k) { a[i][j][k] = (float) rand() / (float) (RAND_MAX); b[i][j][k] = a[i][j][k]; } } } // Start timer #ifndef _OPENMP before = clock(); #else before = omp_get_wtime(); #endif // Perform computations // Perform heat equation for (t = 0; t < num_iterations; ++t) { for (i = 1; i < height - 1; ++i) for (j = 1; j < width - 1; ++j) for (k = 1; k < depth - 1; ++k) b[i][j][k] = beta*a[i][j][k] + alpha*( a[i+1][j][k] + a[i-1][j][k] + a[i][j+1][k] + a[i][j-1][k] + a[i][j][k+1] + a[i][j][k-1]); // pointer swap tmp = b; b = a; a = tmp; } // End timer and evaluate time used #ifndef _OPENMP after = clock(); time_used = (float) (after - before) / (float) CLOCKS_PER_SEC; #else after = omp_get_wtime(); time_used = after - before; #endif // deallocate matrices for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { free(a[i][j]); free(b[i][j]); } free(a[i]); free(b[i]); } free(a); free(b); // Report parameters and results float base = 1e-9*(float)num_iterations/time_used; float gflops = base*(float)(height-2)*(float)(width-2)*(float)(depth-2)*8.0; float bandwidth = base*sizeof(float)*(float)height*(float)width*(float)depth*2.0; printf("3D Grid : %d x %d x %d\n", height, width, depth); printf("Iterations : %d\n", num_iterations); printf("alpha : %g\n", alpha); printf("Time : %f s\n", time_used); printf("Throughput : %f GFLOPS\n", gflops); printf("Minimal Bandwidth : %f GB/s\n", bandwidth); return EXIT_SUCCESS; }
#include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <time.h> #if defined(_OPENMP) #include <omp.h> #endif int main (int argc, char** argv) { // Initialization of variables int i, j, k, t, height = 360, width = 360, depth = 360, num_iterations = 1000, opt; float alpha = 0.1; #ifndef _OPENMP clock_t before, after; #else double before, after; #endif double time_used; // Parsing command-line options while ((opt = getopt(argc, argv, "h:w:d:t:a:")) != -1) { switch (opt) { case 'h': height = atoi(optarg); break; case 'w': width = atoi(optarg); break; case 'd': depth = atoi(optarg); break; case 't': num_iterations = atoi(optarg); break; case 'a': alpha = atof(optarg); break; default: fprintf(stderr, "Usage: %s [-h height] [-w width] [-d depth] [-t no. iterations] [-a alpha value for heat eq.]\n", argv[0]); exit(EXIT_FAILURE); } } // beta reduces the stencil operation to only require 6 flops (instead of 7) float beta = (1 - 6*alpha); // Allocate matrices float ***tmp; // temporary pointer to perform pointer swaps float ***a = (float***) malloc(height*sizeof(float**)); float ***b = (float***) malloc(height*sizeof(float**)); for (i = 0; i < height; ++i) { a[i] = (float**) malloc(width*sizeof(float*)); b[i] = (float**) malloc(width*sizeof(float*)); for (j = 0; j < width; ++j) { a[i][j] = (float*) malloc(depth*sizeof(float)); b[i][j] = (float*) malloc(depth*sizeof(float)); } } // Instantiate random values in matrices #pragma omp parallel for private(j) for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { for (k = 0; k < depth; ++k) { a[i][j][k] = (float) rand() / (float) (RAND_MAX); b[i][j][k] = a[i][j][k]; } } } // Start timer #ifndef _OPENMP before = clock(); #else before = omp_get_wtime(); #endif // Perform computations #pragma omp parallel private(t,i,j,k) { #ifdef _OPENMP #pragma omp single { printf("Using %d OpenMP threads to parallelize heat equation\n", omp_get_num_threads()); fflush(NULL); } #endif // Perform heat equation for (t = 0; t < num_iterations; ++t) { #pragma omp for for (i = 1; i < height - 1; ++i) for (j = 1; j < width - 1; ++j) for (k = 1; k < depth - 1; ++k) b[i][j][k] = beta*a[i][j][k] + alpha*( a[i+1][j][k] + a[i-1][j][k] + a[i][j+1][k] + a[i][j-1][k] + a[i][j][k+1] + a[i][j][k-1]); #pragma omp single { // pointer swap tmp = b; b = a; a = tmp; } } } // End timer and evaluate time used #ifndef _OPENMP after = clock(); time_used = (float) (after - before) / (float) CLOCKS_PER_SEC; #else after = omp_get_wtime(); time_used = after - before; #endif // deallocate matrices for (i = 0; i < height; ++i) { for (j = 0; j < width; ++j) { free(a[i][j]); free(b[i][j]); } free(a[i]); free(b[i]); } free(a); free(b); // Report parameters and results float base = 1e-9*(float)num_iterations/time_used; float gflops = base*(float)(height-2)*(float)(width-2)*(float)(depth-2)*8.0; float bandwidth = base*sizeof(float)*(float)height*(float)width*(float)depth*2.0; printf("3D Grid : %d x %d x %d\n", height, width, depth); printf("Iterations : %d\n", num_iterations); printf("alpha : %g\n", alpha); printf("Time : %f s\n", time_used); printf("Throughput : %f GFLOPS\n", gflops); printf("Minimal Bandwidth : %f GB/s\n", bandwidth); return EXIT_SUCCESS; }
image_manipulation.h
#ifndef KUTILITY_IMAGE_MANIPULATION_H #define KUTILITY_IMAGE_MANIPULATION_H #include "kutility/kutility.def" #include "kutility/general.h" namespace kutility { template<typename T1, typename T2> void scale( T1* src, int h, int w, float sc, T2* dst, int dh, int dw ) { int nh = int( h*sc ); int nw = int( w*sc ); assert( dst != NULL ); assert( nh == dh ); assert( nw == dw ); if( sc == 1 ) { for( int i=0; i<h*w; i++ ) dst[i] = (T2)src[i]; return; } double scale_factor = 1.0 / sc; memset(dst, 0, sizeof(T2)*dh*dw ); float y,x; for( int ny=0; ny<nh; ny++ ) { y = ny * scale_factor; if( y>= h-1 ) continue; for( int nx=0; nx<nw; nx++ ) { x = nx * scale_factor; if( x>= w-1 ) continue; dst[ny*nw+nx] = (T2)bilinear_interpolation(src, w, x, y); } } } template<class T> inline void rgb_to_y(T* cim, int h, int w, T* gim ) { assert( (gim!=NULL) && (cim!=NULL) ); for( int y=0; y<h; y++ ) { for( int x=0; x<w; x++ ) { int index=y*w+x; float r=cim[3*index ]; float g=cim[3*index+1]; float b=cim[3*index+2]; gim[index] = T( 0.299*r + 0.587*g + 0.114*b ); } } } template<class T> inline void y_to_rgb(T* yim, int h, int w, T* rgbim ) { assert( rgbim != NULL ); int wh = w*h; for( int k=0; k<wh; k++ ) { rgbim[ 3*k ] = yim[k]; rgbim[ 3*k+1 ] = yim[k]; rgbim[ 3*k+2 ] = yim[k]; } } template<class T> inline void rgb_to_bgr(T* rgb, int h, int w, T* bgr ) { assert( bgr != NULL ); int wh3 = w*h*3; for( int k=0; k<wh3; k+=3 ) { T tmp = bgr[k]; rgb[ k ] = bgr[ k+2 ]; rgb[ k+1 ] = bgr[ k+1 ]; rgb[ k+2 ] = tmp; } } template<class T> inline void bgr_to_rgb(T* bgr, int h, int w, T* rgb ) { rgb_to_bgr(bgr,h,w,rgb); } template<class T> inline void rgba_to_y(T* cim, int h, int w, T* gim ) { assert( (gim!=NULL) && (cim!=NULL) ); for( int y=0; y<h; y++ ) { for( int x=0; x<w; x++ ) { int index=y*w+x; float r=cim[4*index ]; float g=cim[4*index+1]; float b=cim[4*index+2]; gim[index] = T( 0.299*r + 0.587*g + 0.114*b ); } } } template<class T> inline void rgba_to_rgb(T* rgbaim, int h, int w, T* rgbim ) { assert( (rgbim!=NULL) && (rgbaim!=NULL) ); int wh = w*h; for( int k=0; k<wh; k++ ) { rgbim[3*k ] = rgbaim[4*k ]; rgbim[3*k+1] = rgbaim[4*k+1]; rgbim[3*k+2] = rgbaim[4*k+2]; } } uchar* clean_image (uchar * &image, int w, int h, bool in_place=false); uchar* apply_erosion (uchar * &image, int w, int h, bool in_place=false); uchar* apply_dilation(uchar * &image, int w, int h, bool in_place=false); uchar* down_sample (uchar * image, int w, int h); uchar* resize_image( uchar* &image, int h, int w, int nh, int nw, bool in_place=false); /// scales the image intensity between a lower "il" and an upper /// "iu" value. "sz" is the image size. /// by deafult il=0 and ui = 1; double* scale_intensity( uchar* image, int sz, double il=0, double iu=1); template<class T> void decompose_channels( T* image, int h, int w, T* &ch_0, T* &ch_1, T* &ch_2) { int image_size = h*w; ch_0 = kutility::allocate<uchar>(image_size); ch_1 = kutility::allocate<uchar>(image_size); ch_2 = kutility::allocate<uchar>(image_size); #if defined(WITH_OPENMP) #pragma omp parallel for #endif for( int y=0; y<h; y++ ) { int yw = y*w; for( int x=0; x<w; x++ ) { int index = yw+x; int cindex = 3*index; ch_0[index] = image[index ]; ch_1[index] = image[index+1]; ch_2[index] = image[index+2]; } } } /// applies gamma correction template<class T> inline T* gamma_correction( T* im, int h, int w, double gamma, bool in_place=false) { int sz = w*h; T* out; if( !in_place ) out = kutility::allocate<T>(sz); else out = im; double val; for( int i=0; i<sz; i++ ) { val = (pow( (double)im[i], gamma )); if( val > 255 ) out[i] = (T)255; else out[i] = (T)val; } return out; } /// adds some noise to the pixels template<class T> inline T* add_noise( T* im, int h, int w, int noise_level, bool in_place=false) { int sz = w*h; T* out; if( !in_place ) out = kutility::allocate<T>(sz); else out = im; for( int i=0; i<sz; i++ ) { int sign = 1; if( rand()/(double)RAND_MAX < 0.5 ) sign = -1; out[i] = im[i] + sign * rand()/(double)RAND_MAX * noise_level; } return out; } } #endif
#ifndef KUTILITY_IMAGE_MANIPULATION_H #define KUTILITY_IMAGE_MANIPULATION_H #include "kutility/kutility.def" #include "kutility/general.h" namespace kutility { template < typename T1, typename T2 > void scale(T1 * src, int h, int w, float sc, T2 * dst, int dh, int dw) { int nh = int (h * sc); int nw = int (w * sc); assert(dst != NULL); assert(nh == dh); assert(nw == dw); if (sc == 1) { for (int i = 0; i < h * w; i++) dst[i] = (T2) src[i]; return; } double scale_factor = 1.0 / sc; memset(dst, 0, sizeof(T2) * dh * dw); float y, x; for (int ny = 0; ny < nh; ny++) { y = ny * scale_factor; if (y >= h - 1) continue; for (int nx = 0; nx < nw; nx++) { x = nx * scale_factor; if (x >= w - 1) continue; dst[ny * nw + nx] = (T2) bilinear_interpolation(src, w, x, y); } } } template < class T > inline void rgb_to_y(T * cim, int h, int w, T * gim) { assert((gim != NULL) && (cim != NULL)); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { int index = y * w + x; float r = cim[3 * index]; float g = cim[3 * index + 1]; float b = cim[3 * index + 2]; gim[index] = T(0.299 * r + 0.587 * g + 0.114 * b); } } } template < class T > inline void y_to_rgb(T * yim, int h, int w, T * rgbim) { assert(rgbim != NULL); int wh = w * h; for (int k = 0; k < wh; k++) { rgbim[3 * k] = yim[k]; rgbim[3 * k + 1] = yim[k]; rgbim[3 * k + 2] = yim[k]; } } template < class T > inline void rgb_to_bgr(T * rgb, int h, int w, T * bgr) { assert(bgr != NULL); int wh3 = w * h * 3; for (int k = 0; k < wh3; k += 3) { T tmp = bgr[k]; rgb[k] = bgr[k + 2]; rgb[k + 1] = bgr[k + 1]; rgb[k + 2] = tmp; } } template < class T > inline void bgr_to_rgb(T * bgr, int h, int w, T * rgb) { rgb_to_bgr(bgr, h, w, rgb); } template < class T > inline void rgba_to_y(T * cim, int h, int w, T * gim) { assert((gim != NULL) && (cim != NULL)); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { int index = y * w + x; float r = cim[4 * index]; float g = cim[4 * index + 1]; float b = cim[4 * index + 2]; gim[index] = T(0.299 * r + 0.587 * g + 0.114 * b); } } } template < class T > inline void rgba_to_rgb(T * rgbaim, int h, int w, T * rgbim) { assert((rgbim != NULL) && (rgbaim != NULL)); int wh = w * h; for (int k = 0; k < wh; k++) { rgbim[3 * k] = rgbaim[4 * k]; rgbim[3 * k + 1] = rgbaim[4 * k + 1]; rgbim[3 * k + 2] = rgbaim[4 * k + 2]; } } uchar *clean_image(uchar * &image, int w, int h, bool in_place = false); uchar *apply_erosion(uchar * &image, int w, int h, bool in_place = false); uchar *apply_dilation(uchar * &image, int w, int h, bool in_place = false); uchar *down_sample(uchar * image, int w, int h); uchar *resize_image(uchar * &image, int h, int w, int nh, int nw, bool in_place = false); ///scales the image intensity between a lower "il" and an upper /// "iu" value."sz" is the image size. /// by deafult il = 0 and ui = 1; double *scale_intensity(uchar * image, int sz, double il = 0, double iu = 1); template < class T > void decompose_channels(T * image, int h, int w, T * &ch_0, T * &ch_1, T * &ch_2) { int image_size = h * w; ch_0 = kutility: : allocate < uchar > (image_size); ch_1 = kutility: : allocate < uchar > (image_size); ch_2 = kutility: : allocate < uchar > (image_size); for (int y = 0; y < h; y++) { int yw = y * w; for (int x = 0; x < w; x++) { int index = yw + x; int cindex = 3 * index; ch_0[index] = image[index]; ch_1[index] = image[index + 1]; ch_2[index] = image[index + 2]; } } } ///applies gamma correction template < class T > inline T * gamma_correction(T * im, int h, int w, double gamma, bool in_place = false) { int sz = w * h; T *out; if (!in_place) out = kutility: :allocate < T > (sz); else out = im; double val; for (int i = 0; i < sz; i++) { val = (pow((double)im[i], gamma)); if (val > 255) out[i] = (T) 255; else out[i] = (T) val; } return out; } ///adds some noise to the pixels template < class T > inline T * add_noise(T * im, int h, int w, int noise_level, bool in_place = false) { int sz = w * h; T *out; if (!in_place) out = kutility: :allocate < T > (sz); else out = im; for (int i = 0; i < sz; i++) { int sign = 1; if (rand() / (double)RAND_MAX < 0.5) sign = -1; out[i] = im[i] + sign * rand() / (double)RAND_MAX *noise_level; } return out; } } #endif
#ifndef KUTILITY_IMAGE_MANIPULATION_H #define KUTILITY_IMAGE_MANIPULATION_H #include "kutility/kutility.def" #include "kutility/general.h" namespace kutility { template < typename T1, typename T2 > void scale(T1 * src, int h, int w, float sc, T2 * dst, int dh, int dw) { int nh = int (h * sc); int nw = int (w * sc); assert(dst != NULL); assert(nh == dh); assert(nw == dw); if (sc == 1) { for (int i = 0; i < h * w; i++) dst[i] = (T2) src[i]; return; } double scale_factor = 1.0 / sc; memset(dst, 0, sizeof(T2) * dh * dw); float y, x; for (int ny = 0; ny < nh; ny++) { y = ny * scale_factor; if (y >= h - 1) continue; for (int nx = 0; nx < nw; nx++) { x = nx * scale_factor; if (x >= w - 1) continue; dst[ny * nw + nx] = (T2) bilinear_interpolation(src, w, x, y); } } } template < class T > inline void rgb_to_y(T * cim, int h, int w, T * gim) { assert((gim != NULL) && (cim != NULL)); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { int index = y * w + x; float r = cim[3 * index]; float g = cim[3 * index + 1]; float b = cim[3 * index + 2]; gim[index] = T(0.299 * r + 0.587 * g + 0.114 * b); } } } template < class T > inline void y_to_rgb(T * yim, int h, int w, T * rgbim) { assert(rgbim != NULL); int wh = w * h; for (int k = 0; k < wh; k++) { rgbim[3 * k] = yim[k]; rgbim[3 * k + 1] = yim[k]; rgbim[3 * k + 2] = yim[k]; } } template < class T > inline void rgb_to_bgr(T * rgb, int h, int w, T * bgr) { assert(bgr != NULL); int wh3 = w * h * 3; for (int k = 0; k < wh3; k += 3) { T tmp = bgr[k]; rgb[k] = bgr[k + 2]; rgb[k + 1] = bgr[k + 1]; rgb[k + 2] = tmp; } } template < class T > inline void bgr_to_rgb(T * bgr, int h, int w, T * rgb) { rgb_to_bgr(bgr, h, w, rgb); } template < class T > inline void rgba_to_y(T * cim, int h, int w, T * gim) { assert((gim != NULL) && (cim != NULL)); for (int y = 0; y < h; y++) { for (int x = 0; x < w; x++) { int index = y * w + x; float r = cim[4 * index]; float g = cim[4 * index + 1]; float b = cim[4 * index + 2]; gim[index] = T(0.299 * r + 0.587 * g + 0.114 * b); } } } template < class T > inline void rgba_to_rgb(T * rgbaim, int h, int w, T * rgbim) { assert((rgbim != NULL) && (rgbaim != NULL)); int wh = w * h; for (int k = 0; k < wh; k++) { rgbim[3 * k] = rgbaim[4 * k]; rgbim[3 * k + 1] = rgbaim[4 * k + 1]; rgbim[3 * k + 2] = rgbaim[4 * k + 2]; } } uchar *clean_image(uchar * &image, int w, int h, bool in_place = false); uchar *apply_erosion(uchar * &image, int w, int h, bool in_place = false); uchar *apply_dilation(uchar * &image, int w, int h, bool in_place = false); uchar *down_sample(uchar * image, int w, int h); uchar *resize_image(uchar * &image, int h, int w, int nh, int nw, bool in_place = false); ///scales the image intensity between a lower "il" and an upper /// "iu" value."sz" is the image size. /// by deafult il = 0 and ui = 1; double *scale_intensity(uchar * image, int sz, double il = 0, double iu = 1); template < class T > void decompose_channels(T * image, int h, int w, T * &ch_0, T * &ch_1, T * &ch_2) { int image_size = h * w; ch_0 = kutility: : allocate < uchar > (image_size); ch_1 = kutility: : allocate < uchar > (image_size); ch_2 = kutility: : allocate < uchar > (image_size); #if defined(WITH_OPENMP) #pragma omp parallel for #endif for (int y = 0; y < h; y++) { int yw = y * w; for (int x = 0; x < w; x++) { int index = yw + x; int cindex = 3 * index; ch_0[index] = image[index]; ch_1[index] = image[index + 1]; ch_2[index] = image[index + 2]; } } } ///applies gamma correction template < class T > inline T * gamma_correction(T * im, int h, int w, double gamma, bool in_place = false) { int sz = w * h; T *out; if (!in_place) out = kutility: :allocate < T > (sz); else out = im; double val; for (int i = 0; i < sz; i++) { val = (pow((double)im[i], gamma)); if (val > 255) out[i] = (T) 255; else out[i] = (T) val; } return out; } ///adds some noise to the pixels template < class T > inline T * add_noise(T * im, int h, int w, int noise_level, bool in_place = false) { int sz = w * h; T *out; if (!in_place) out = kutility: :allocate < T > (sz); else out = im; for (int i = 0; i < sz; i++) { int sign = 1; if (rand() / (double)RAND_MAX < 0.5) sign = -1; out[i] = im[i] + sign * rand() / (double)RAND_MAX *noise_level; } return out; } } #endif
GB_binop__bget_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_int32 // A.*B function (eWiseMult): GB_AemultB__bget_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_int32 // C+=b function (dense accum): GB_Cdense_accumb__bget_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int32 // C=scalar+B GB_bind1st__bget_int32 // C=scalar+B' GB_bind1st_tran__bget_int32 // C=A+scalar GB_bind2nd__bget_int32 // C=A'+scalar GB_bind2nd_tran__bget_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITGET (x, y, int32_t, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \ } GrB_Info GB_bind1st_tran__bget_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \ } GrB_Info GB_bind2nd_tran__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_int32 // A.*B function (eWiseMult): GB_AemultB__bget_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_int32 // C+=b function (dense accum): GB_Cdense_accumb__bget_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int32 // C=scalar+B GB_bind1st__bget_int32 // C=scalar+B' GB_bind1st_tran__bget_int32 // C=A+scalar GB_bind2nd__bget_int32 // C=A'+scalar GB_bind2nd_tran__bget_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITGET (x, y, int32_t, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \ } GrB_Info GB_bind1st_tran__bget_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \ } GrB_Info GB_bind2nd_tran__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bget_int32 // A.*B function (eWiseMult): GB_AemultB__bget_int32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bget_int32 // C+=b function (dense accum): GB_Cdense_accumb__bget_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int32 // C=scalar+B GB_bind1st__bget_int32 // C=scalar+B' GB_bind1st_tran__bget_int32 // C=A+scalar GB_bind2nd__bget_int32 // C=A'+scalar GB_bind2nd_tran__bget_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_BITGET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITGET (x, y, int32_t, 32) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT32 || GxB_NO_BGET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bget_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bget_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bget_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bget_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bget_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, int32_t, 32) ; \ } GrB_Info GB_bind1st_tran__bget_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, int32_t, 32) ; \ } GrB_Info GB_bind2nd_tran__bget_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd foo void test_no_clause() { int i; #pragma omp parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}} #pragma omp parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel #pragma omp parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp parallel for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp parallel for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp parallel for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp parallel for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} for (int i = 0; i < 10; ++i) ; }
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive ' // expected-error@+1 {{unexpected OpenMP directive ' void test_no_clause() { int i; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after ' ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of ' for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of ' for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of ' for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of ' for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of ' for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of ' // expected-error@+1 {{expected '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of ' // expected-error@+1 {{expected '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; // expected-error@+1 {{expected '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of ' // expected-error@+1 {{expected '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after ' // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after ' // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after ' // expected-note@+1 {{as specified in 'collapse' clause}} for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after ' // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after ' // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after ' for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after ' // expected-error@+1 {{expression is not an integer constant expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} for (i = 0; i < 16; ++i) ; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} for (i = 0; i < 16; ++i) ; int x, y, z; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} for (i = 0; i < 16; ++i) ; int x, y, z; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} for (i = 0; i < 16; ++i) ; int x, y, z; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive ' for (i = 0; i < 16; ++i) ; for (int i = 0; i < 10; ++i) ; for (int i = 0; i < 10; ++i) ; for (int i = 0; i < 10; ++i) ; for (int i = 0; i < 10; ++i) ; for (int i = 0; i < 10; ++i) ; }
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd foo void test_no_clause() { int i; #pragma omp parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}} #pragma omp parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel #pragma omp parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected expression}} #pragma omp parallel for simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{expected variable name}} #pragma omp parallel for simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp parallel for simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp parallel for simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp parallel for simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp parallel for simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp parallel for simd'}} for (int i = 0; i < 10; ++i) ; }
psamax.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pdzamax.c, normal z -> s, Fri Sep 28 17:38:10 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (float*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_psamax(plasma_enum_t colrow, plasma_desc_t A, float *work, float *values, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (colrow) { //=================== // PlasmaColumnwise //=================== case PlasmaColumnwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_samax(PlasmaColumnwise, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); } } #pragma omp taskwait plasma_core_omp_samax(PlasmaRowwise, A.n, A.mt, work, A.n, values, sequence, request); break; //================ // PlasmaRowwise //================ case PlasmaRowwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_samax(PlasmaRowwise, mvam, nvan, A(m, n), ldam, &work[A.m*n+m*A.mb], sequence, request); } } #pragma omp taskwait plasma_core_omp_samax(PlasmaRowwise, A.m, A.nt, work, A.m, values, sequence, request); } }
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pdzamax.c, normal z -> s, Fri Sep 28 17:38:10 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (float*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_psamax(plasma_enum_t colrow, plasma_desc_t A, float *work, float *values, plasma_sequence_t * sequence, plasma_request_t * request) { //Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (colrow) { //=================== //PlasmaColumnwise //=================== case PlasmaColumnwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_samax(PlasmaColumnwise, mvam, nvan, A(m, n), ldam, &work[A.n * m + n * A.nb], sequence, request); } } plasma_core_omp_samax(PlasmaRowwise, A.n, A.mt, work, A.n, values, sequence, request); break; //================ //PlasmaRowwise //================ case PlasmaRowwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_samax(PlasmaRowwise, mvam, nvan, A(m, n), ldam, &work[A.m * n + m * A.mb], sequence, request); } } plasma_core_omp_samax(PlasmaRowwise, A.m, A.nt, work, A.m, values, sequence, request); } }
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pdzamax.c, normal z -> s, Fri Sep 28 17:38:10 2018 * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (float*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_psamax(plasma_enum_t colrow, plasma_desc_t A, float *work, float *values, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; switch (colrow) { //=================== // PlasmaColumnwise //=================== case PlasmaColumnwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_samax(PlasmaColumnwise, mvam, nvan, A(m, n), ldam, &work[A.n*m+n*A.nb], sequence, request); } } #pragma omp taskwait plasma_core_omp_samax(PlasmaRowwise, A.n, A.mt, work, A.n, values, sequence, request); break; //================ // PlasmaRowwise //================ case PlasmaRowwise: for (int m = 0; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); for (int n = 0; n < A.nt; n++) { int nvan = plasma_tile_nview(A, n); plasma_core_omp_samax(PlasmaRowwise, mvam, nvan, A(m, n), ldam, &work[A.m*n+m*A.mb], sequence, request); } } #pragma omp taskwait plasma_core_omp_samax(PlasmaRowwise, A.m, A.nt, work, A.m, values, sequence, request); } }
THTensorConv.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorConv.c" #else /* 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validXCorr2Dptr)(real *r_, real alpha, real *t_, int64_t ir, int64_t ic, real *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + yy*sr*ic + xx*sc; real *pw_ = k_; real sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } /* Update output */ *r_++ += alpha*sum; } } } else { /* SSE-based convolution */ for(yy = 0; yy < or; yy++) { real *pi_ = t_ + yy*sr*ic; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(r_, r_, pis_, alpha*pw_[kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } r_ += oc; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validConv2Dptr)(real *r_, real alpha, real *t_, int64_t ir, int64_t ic, real *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + yy*sr*ic + xx*sc; real *pw_ = k_ + kr*kc - 1; real sum = 0; for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } /* Update output */ *r_++ += alpha*sum; } } } else { /* SSE-based convolution */ for(yy = 0; yy < or; yy++) { real *pw_ = k_ + kr*kc - 1; real *pi_ = t_ + yy*sr*ic; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(r_, r_, pis_, alpha*pw_[-kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } r_ += oc; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ void THTensor_(fullConv2Dptr)(real *r_, real alpha, real *t_, int64_t ir, int64_t ic, real *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; int64_t xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + yy*sr*oc + xx*sc; real *pw_ = k_; for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[kx]; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for(yy = 0; yy < ir; yy++) { real *po_ = r_ + yy*sr*oc; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(pos_, pos_, t_, alpha*pw_[kx], ic); pos_++; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_ += ic; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. */ void THTensor_(fullXCorr2Dptr)(real *r_, real alpha, real *t_, int64_t ir, int64_t ic, real *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; int64_t xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + yy*sr*oc + xx*sc; real *pw_ = k_ + kr*kc -1; int64_t kx, ky; for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for(yy = 0; yy < ir; yy++) { real *po_ = r_ + yy*sr*oc; real *pw_ = k_ + kr*kc -1; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd)(pos_, pos_, t_, pw_[-kx]*alpha, ic); pos_++; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_ += ic; } } } /* 2D Input, 2D kernel : convolve given image with the given kernel, valid convolution. for sr,sc=1 this is equivalent to validXCorr2Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(validXCorr2DRevptr)(real *r_, real alpha, real *t_, int64_t ir, int64_t ic, real *k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = ir - (kr - 1) * sr; int64_t oc = ic - (kc - 1) * sc; int64_t xx, yy, kx, ky; if ((sc != 1) || (kc < 4)) { /* regular convolution */ for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy*sr*ic + xx*sc; real z = *k_++ * alpha; for(ky = 0; ky < or; ky++) { for(kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } } } } else { /* SSE-based convolution */ for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy*sr*ic + xx*sc; real z = *k_++ * alpha; for(ky = 0; ky < or; ky++) { THVector_(cadd)(po_, po_, pi_, z, oc); pi_ += ic; po_ += oc; } } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validXCorr3Dptr)(real *r_, real alpha, real *t_, int64_t it, int64_t ir, int64_t ic, real *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t zz, xx, yy; for (zz = 0; zz < ot; zz++) { for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real *pw_ = k_; real sum = 0; int64_t kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } pi_ += (ir-kr)*ic; /* next input slice */ } /* Update output */ *r_++ += sum*alpha; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validConv3Dptr)(real *r_, real alpha, real *t_, int64_t it, int64_t ir, int64_t ic, real *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t zz, xx, yy; for(zz = 0; zz < ot; zz++) { for(yy = 0; yy < or; yy++) { for(xx = 0; xx < oc; xx++) { /* Dot product in two dimensions... (between input image and the mask) */ real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real *pw_ = k_ + kt*kr*kc - 1; real sum = 0; int64_t kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { for(kx = 0; kx < kc; kx++) { sum += pi_[kx]*pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } pi_ += (ir-kr)*ic; /* next input slice */ } /* Update output */ *r_++ += alpha*sum; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ void THTensor_(fullConv3Dptr)(real *r_, real alpha, real *t_, int64_t it, int64_t ir, int64_t ic, real *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or = (ir - 1) * sr + kr; int64_t oc = (ic - 1) * sc + kc; int64_t zz, xx, yy; for(zz = 0; zz < it; zz++) { for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; real *pw_ = k_; int64_t kz, kx, ky; /* printf("Output Plane : %ld,%ld,%ld, input val=%g\n",zz,yy,xx,*t_); */ for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { /* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */ po_[kx] += z * pw_[kx]; /* printf("o=%g " , po_[kx]); */ } /* printf("\n"); */ po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } po_ += (or-kr)*oc; /* next output slice */ /* printf("\n"); */ } t_++; } } } } /* 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. */ void THTensor_(fullXCorr3Dptr)(real *r_, real alpha, real *t_, int64_t it, int64_t ir, int64_t ic, real *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or = (ir - 1) * sr + kr; int64_t oc = (ic - 1) * sc + kc; int64_t zz, xx, yy; for(zz = 0; zz < it; zz++) { for(yy = 0; yy < ir; yy++) { for(xx = 0; xx < ic; xx++) { /* Outer product in two dimensions... (between input image and the mask) */ real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; real *pw_ = k_ + kt*kr*kc -1; int64_t kz, kx, ky; for(kz = 0; kz < kt; kz++) { for(ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for(kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } po_ += (or-kr)*oc; /* next output slice */ } t_++; } } } } /* 3D Input, 3D kernel : convolve given image with the given kernel, valid convolution. for sr,sc=1 this is equivalent to validXCorr3Dptr, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(validXCorr3DRevptr)(real *r_, real alpha, real *t_, int64_t it, int64_t ir, int64_t ic, real *k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = it - (kt - 1) * st; int64_t or = ir - (kr - 1) * sr; int64_t oc = ic - (kc - 1) * sc; int64_t zz, xx, yy; for(zz = 0; zz < kt; zz++) { for(yy = 0; yy < kr; yy++) { for(xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; real z = *k_++ * alpha; int64_t kz, kx, ky; for(kz = 0; kz < ot; kz++) { for(ky = 0; ky < or; ky++) { for(kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } pi_ += (ir-or)*ic; /* next input slice */ } } } } } void THTensor_(conv2d)(real* output_data, real alpha, real* ptr_input, int64_t nInputRows, int64_t nInputCols, real* ptr_weight, int64_t nKernelRows, int64_t nKernelCols, int64_t srow, int64_t scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } void THTensor_(conv3d)(real* output_data, real alpha, real* ptr_input, int64_t nInputDepth, int64_t nInputRows, int64_t nInputCols, real* ptr_weight, int64_t nKernelDepth, int64_t nKernelRows, int64_t nKernelCols, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(fullConv3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else if (*xc == 'X') THTensor_(validXCorr3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(validConv3Dptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); } int64_t THTensor_(convsize)(int64_t x, int64_t k, int64_t s, const char* vf) { THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'"); if (*vf == 'V') return (x-k)/s + 1; else return (x-1)*s + k; } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "covn2DRevger : Input image is smaller than kernel"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { int64_t i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol) { int64_t nbatch, nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputRows, nOutputCols; int64_t istride0, kstride0, istride1, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; istride1 = input->stride[1]; nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelPlane = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv2DRevger : Input image is smaller than kernel"); THArgCheck(kernel->size[0] == input->size[0] , 2, "conv2DRevger : Input batch and kernel batch is not same size"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { int64_t i; for(i = 0; i < nInputPlane; i++) { int64_t p; for(p = 0; p < nbatch; p++) { /* get kernel */ real *ptr_weight = weight_data + p*kstride0 + k*kstride1; /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data + p*istride0 + i*istride1; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]*r_->size[1]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nKernelPlane; k++) { int64_t i; /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0, kstride1; THTensor *input; THTensor* kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real* ptr_output = output_data + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for(k = 0; k < nOutputPlane; k++) { int64_t i; /* get output */ real *ptr_output = output_data + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t kstride0, kstride1; THTensor *input; THTensor* kernel; int64_t nbatch; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; int64_t p; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { /*THTensor_(zero)(r_);*/ #pragma omp parallel for private(p) for (p=0; p < r_->size[0]; p++) { int64_t k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] = 0.0; } } } else if (beta != 1) { /*THTensor_(mul)(r_, beta);*/ #pragma omp parallel for private(p) for(p=0; p < r_->size[0]; p++) { int64_t k; for (k = 0; k < r_->size[1]; k++) { real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; int64_t l; for (l = 0; l < nOutputRows*nOutputCols; l++) ptr_output[l] *= beta; } } } #pragma omp parallel for private(p) for(p=0; p < nbatch; p++) { int64_t k; for(k = 0; k < nOutputPlane; k++) { int64_t i; /* get output */ real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows;*/ } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 2D input, 2D kernel, 2D output scalar multiplication like y <- x*y + beta*y */ void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; THTensor* kernel; int64_t nInputRows; int64_t nInputCols; int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 2 , 3, "input: 2D Tensor expected"); THArgCheck(k_->nDimension == 2 , 4, "kernel: 2D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputRows = input->size[0]; nInputCols = input->size[1]; nKernelRows = kernel->size[0]; nKernelCols = kernel->size[1]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize2d)(r_, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) THTensor_(zero)(r_); else if (beta != 1) THTensor_(mul)(r_, r_, beta); ptr_input = THTensor_(data)(input); ptr_weight = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); /* do image, kernel convolution */ THTensor_(conv2d)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output component wise multiplication like y <- y.*x + beta*y */ void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv2d)(output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output component wise multiplication like with a permutation map y <- y.*x + beta*y */ void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor* kernel; real *input_data; real *weight_data; real *output_data; int64_t nmaps; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); nmaps = map->size[0]; for(k = 0; k < nmaps; k++) { /* get indices */ int64_t from = (int64_t)THTensor_(get2d)(map,k,0)-1; int64_t to = (int64_t)THTensor_(get2d)(map,k,1)-1; /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + from*istride0; /* get output */ real *ptr_output = output_data + to*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv2d)(ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A for sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 */ void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth= kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv3DRevger : Input image is smaller than kernel"); nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth; nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement)(r_); THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(validXCorr3DRevptr)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data+k*kstride0; for(i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data+i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 5D kernel, 4D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 5 , 4, "kernel: 5D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) { kernel = THTensor_(newContiguous)(k_); } else { THTensor_(retain)(k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelDepth = kernel->size[2]; nKernelRows = kernel->size[3]; nKernelCols = kernel->size[4]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nOutputPlane; k++) { for(i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0 + i*kstride1; /* get input */ real *ptr_input = input_data + i*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 3D input, 3D kernel, 3D output scalar multiplication like y <- x*y + beta*y */ void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; THTensor* kernel; int64_t nInputDepth; int64_t nInputRows; int64_t nInputCols; int64_t nKernelDepth; int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputDepth, nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); nInputDepth = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; nKernelDepth = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize3d)(r_, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) THTensor_(zero)(r_); else if (beta != 1) THTensor_(mul)(r_, r_, beta); ptr_input = THTensor_(data)(input); ptr_weight = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 4D output component wise multiplication like y <- y.*x + beta*y */ void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 4 , 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); for(k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + k*istride0; /* do image, kernel convolution */ THTensor_(conv3d)(output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth*nOutputCols*nOutputRows; } THTensor_(free)(input); THTensor_(free)(kernel); } /* 4D input, 4D kernel, 4D output component wise multiplication like with a permutation map y <- y.*x + beta*y */ void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; int64_t nmaps; int64_t k; THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous)(t_); kernel = THTensor_(newContiguous)(k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmap : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement)(r_); THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) { THTensor_(zero)(r_); } else if (beta != 1) THTensor_(mul)(r_, r_, beta); input_data = THTensor_(data)(input); weight_data = THTensor_(data)(kernel); output_data = THTensor_(data)(r_); nmaps = map->size[0]; for(k = 0; k < nmaps; k++) { /* get indices */ int64_t from = (int64_t)THTensor_(get2d)(map,k,0)-1; int64_t to = (int64_t)THTensor_(get2d)(map,k,1)-1; /* get kernel */ real *ptr_weight = weight_data + k*kstride0; /* get input */ real *ptr_input = input_data + from*istride0; /* get output */ real *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols; /* do image, kernel convolution */ THTensor_(conv3d)(ptr_output, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } THTensor_(free)(input); THTensor_(free)(kernel); } #endif
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorConv.c" #else /* * 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validXCorr2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + yy * sr * ic + xx * sc; real *pw_ = k_; real sum = 0; for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } /* Update output */ *r_++ += alpha * sum; } } } else { /* SSE-based convolution */ for (yy = 0; yy < or; yy++) { real *pi_ = t_ + yy * sr * ic; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (r_, r_, pis_, alpha * pw_[kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } r_ += oc; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validConv2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + yy * sr * ic + xx * sc; real *pw_ = k_ + kr * kc - 1; real sum = 0; for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } /* Update output */ *r_++ += alpha * sum; } } } else { /* SSE-based convolution */ for (yy = 0; yy < or; yy++) { real *pw_ = k_ + kr * kc - 1; real *pi_ = t_ + yy * sr * ic; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (r_, r_, pis_, alpha * pw_[-kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } r_ += oc; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel, full * convolution. */ void THTensor_(fullConv2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; int64_t xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + yy * sr * oc + xx * sc; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[kx]; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for (yy = 0; yy < ir; yy++) { real *po_ = r_ + yy * sr * oc; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (pos_, pos_, t_, alpha * pw_[kx], ic); pos_++; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_ += ic; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel, full * convolution. */ void THTensor_(fullXCorr2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; int64_t xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + yy * sr * oc + xx * sc; real *pw_ = k_ + kr * kc - 1; int64_t kx, ky; for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for (yy = 0; yy < ir; yy++) { real *po_ = r_ + yy * sr * oc; real *pw_ = k_ + kr * kc - 1; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (pos_, pos_, t_, pw_[-kx] * alpha, ic); pos_++; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_ += ic; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel, valid * convolution. for sr,sc=1 this is equivalent to validXCorr2Dptr, but * otherwise it is useful for calculating derivatives wrt a kernel that is * applied with stride sr,sc != 1 */ void THTensor_(validXCorr2DRevptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = ir - (kr - 1) * sr; int64_t oc = ic - (kc - 1) * sc; int64_t xx, yy, kx, ky; if ((sc != 1) || (kc < 4)) { /* regular convolution */ for (yy = 0; yy < kr; yy++) { for (xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy * sr * ic + xx * sc; real z = *k_++ * alpha; for (ky = 0; ky < or; ky++) { for (kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } } } } else { /* SSE-based convolution */ for (yy = 0; yy < kr; yy++) { for (xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy * sr * ic + xx * sc; real z = *k_++ * alpha; for (ky = 0; ky < or; ky++) { THVector_(cadd) (po_, po_, pi_, z, oc); pi_ += ic; po_ += oc; } } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validXCorr3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t zz, xx, yy; for (zz = 0; zz < ot; zz++) { for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + zz * st * ir * ic + yy * sr * ic + xx * sc; real *pw_ = k_; real sum = 0; int64_t kz, kx, ky; for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } pi_ += (ir - kr) * ic; /* next input slice */ } /* Update output */ *r_++ += sum * alpha; } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validConv3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t zz, xx, yy; for (zz = 0; zz < ot; zz++) { for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + zz * st * ir * ic + yy * sr * ic + xx * sc; real *pw_ = k_ + kt * kr * kc - 1; real sum = 0; int64_t kz, kx, ky; for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } pi_ += (ir - kr) * ic; /* next input slice */ } /* Update output */ *r_++ += alpha * sum; } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel, full * convolution. */ void THTensor_(fullConv3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or = (ir - 1) * sr + kr; int64_t oc = (ic - 1) * sc + kc; int64_t zz, xx, yy; for (zz = 0; zz < it; zz++) { for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + zz * st * or * oc + yy * sr * oc + xx * sc; real *pw_ = k_; int64_t kz, kx, ky; /* * printf("Output Plane : %ld,%ld,%ld, input * val=%g\n",zz,yy,xx,*t_); */ for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { /* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */ po_[kx] += z * pw_[kx]; /* printf("o=%g " , po_[kx]); */ } /* printf("\n"); */ po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } po_ += (or - kr) * oc; /* next output slice */ /* printf("\n"); */ } t_++; } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel, full * convolution. */ void THTensor_(fullXCorr3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or = (ir - 1) * sr + kr; int64_t oc = (ic - 1) * sc + kc; int64_t zz, xx, yy; for (zz = 0; zz < it; zz++) { for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + zz * st * or * oc + yy * sr * oc + xx * sc; real *pw_ = k_ + kt * kr * kc - 1; int64_t kz, kx, ky; for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } po_ += (or - kr) * oc; /* next output slice */ } t_++; } } } } /* * 3D Input, 3D kernel : convolve given image with the given kernel, valid * convolution. for sr,sc=1 this is equivalent to validXCorr3Dptr, but * otherwise it is useful for calculating derivatives wrt a kernel that is * applied with stride sr,sc != 1 */ void THTensor_(validXCorr3DRevptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = it - (kt - 1) * st; int64_t or = ir - (kr - 1) * sr; int64_t oc = ic - (kc - 1) * sc; int64_t zz, xx, yy; for (zz = 0; zz < kt; zz++) { for (yy = 0; yy < kr; yy++) { for (xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + zz * st * ir * ic + yy * sr * ic + xx * sc; real z = *k_++ * alpha; int64_t kz, kx, ky; for (kz = 0; kz < ot; kz++) { for (ky = 0; ky < or; ky++) { for (kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } pi_ += (ir - or) * ic; /* next input slice */ } } } } } void THTensor_(conv2d) (real * output_data, real alpha, real * ptr_input, int64_t nInputRows, int64_t nInputCols, real * ptr_weight, int64_t nKernelRows, int64_t nKernelCols, int64_t srow, int64_t scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } void THTensor_(conv3d) (real * output_data, real alpha, real * ptr_input, int64_t nInputDepth, int64_t nInputRows, int64_t nInputCols, real * ptr_weight, int64_t nKernelDepth, int64_t nKernelRows, int64_t nKernelCols, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(fullConv3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else if (*xc == 'X') THTensor_(validXCorr3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(validConv3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); } int64_t THTensor_(convsize) (int64_t x, int64_t k, int64_t s, const char *vf) { THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'"); if (*vf == 'V') return (x - k) / s + 1; else return (x - 1) * s + k; } /* * 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for * sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for * calculating derivatives wrt a kernel that is applied with stride sr,sc != * 1 */ void THTensor_(conv2DRevger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols, 2, "covn2DRevger : Input image is smaller than kernel"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } for (k = 0; k < nKernelPlane; k++) { int64_t i; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k * nInputPlane * nOutputCols * nOutputRows + i * nOutputCols * nOutputRows; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for * sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for * calculating derivatives wrt a kernel that is applied with stride sr,sc != * 1 */ void THTensor_(conv2DRevgerm) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol) { int64_t nbatch, nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputRows, nOutputCols; int64_t istride0, kstride0, istride1, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; istride1 = input->stride[1]; nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelPlane = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols, 2, "conv2DRevger : Input image is smaller than kernel"); THArgCheck(kernel->size[0] == input->size[0], 2, "conv2DRevger : Input batch and kernel batch is not same size"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } for (k = 0; k < nKernelPlane; k++) { int64_t i; for (i = 0; i < nInputPlane; i++) { int64_t p; for (p = 0; p < nbatch; p++) { /* get kernel */ real *ptr_weight = weight_data + p * kstride0 + k * kstride1; /* get output */ real *ptr_output = output_data + k * nInputPlane * nOutputCols * nOutputRows + i * nOutputCols * nOutputRows; /* get input */ real *ptr_input = input_data + p * istride0 + i * istride1; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv2Dger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } for (k = 0; k < nKernelPlane; k++) { int64_t i; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k * nInputPlane * nOutputCols * nOutputRows + i * nOutputCols * nOutputRows; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmv) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous) (k_); } else { THTensor_(retain) (k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ for (k = 0; k < r_->size[0]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ for (k = 0; k < r_->size[0]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } for (k = 0; k < nOutputPlane; k++) { int64_t i; /* get output */ real *ptr_output = output_data + k * nOutputCols * nOutputRows; for (i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0 + i * kstride1; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmm) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t kstride0, kstride1; THTensor *input; THTensor *kernel; int64_t nbatch; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; int64_t p; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous) (k_); } else { THTensor_(retain) (k_); kernel = k_; } nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ for (p = 0; p < r_->size[0]; p++) { int64_t k; for (k = 0; k < r_->size[1]; k++) { real *ptr_output = output_data + p * nOutputPlane * nOutputRows * nOutputCols + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ for (p = 0; p < r_->size[0]; p++) { int64_t k; for (k = 0; k < r_->size[1]; k++) { real *ptr_output = output_data + p * nOutputPlane * nOutputRows * nOutputCols + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } } for (p = 0; p < nbatch; p++) { int64_t k; for (k = 0; k < nOutputPlane; k++) { int64_t i; /* get output */ real *ptr_output = output_data + p * nOutputPlane * nOutputCols * nOutputRows + k * nOutputCols * nOutputRows; for (i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0 + i * kstride1; /* get input */ real *ptr_input = input_data + p * nInputPlane * nInputRows * nInputCols + i * nInputRows * nInputCols; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 2D input, 2D kernel, 2D output scalar multiplication like y <- x*y + * beta*y */ void THTensor_(conv2Dmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; THTensor *kernel; int64_t nInputRows; int64_t nInputCols; int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 2, 3, "input: 2D Tensor expected"); THArgCheck(k_->nDimension == 2, 4, "kernel: 2D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputRows = input->size[0]; nInputCols = input->size[1]; nKernelRows = kernel->size[0]; nKernelCols = kernel->size[1]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize2d) (r_, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) THTensor_(zero) (r_); else if (beta != 1) THTensor_(mul) (r_, r_, beta); ptr_input = THTensor_(data) (input); ptr_weight = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); /* do image, kernel convolution */ THTensor_(conv2d) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 3D output component wise multiplication like y <- * y.*x + beta*y */ void THTensor_(conv2Dcmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + k * istride0; /* do image, kernel convolution */ THTensor_(conv2d) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputCols * nOutputRows; } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 3D output component wise multiplication like with a * permutation map y <- y.*x + beta*y */ void THTensor_(conv2Dmap) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, THTensor * map, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; int64_t nmaps; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(map->nDimension == 2, 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); nmaps = map->size[0]; for (k = 0; k < nmaps; k++) { /* get indices */ int64_t from = (int64_t) THTensor_(get2d) (map, k, 0) - 1; int64_t to = (int64_t) THTensor_(get2d) (map, k, 1) - 1; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + from * istride0; /* get output */ real *ptr_output = output_data + to * nOutputRows * nOutputCols; /* do image, kernel convolution */ THTensor_(conv2d) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A for * sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for * calculating derivatives wrt a kernel that is applied with stride sr,sc != * 1 */ void THTensor_(conv3DRevger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols, 2, "conv3DRevger : Input image is smaller than kernel"); nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth; nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement) (r_); THTensor_(resize5d) (r_, nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(validXCorr3DRevptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv3Dger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize5d) (r_, nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 5D kernel, 4D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv3Dmv) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 5, 4, "kernel: 5D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) { kernel = THTensor_(newContiguous) (k_); } else { THTensor_(retain) (k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelDepth = kernel->size[2]; nKernelRows = kernel->size[3]; nKernelCols = kernel->size[4]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nOutputPlane; k++) { for (i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0 + i * kstride1; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 3D output scalar multiplication like y <- x*y + * beta*y */ void THTensor_(conv3Dmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; THTensor *kernel; int64_t nInputDepth; int64_t nInputRows; int64_t nInputCols; int64_t nKernelDepth; int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputDepth, nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputDepth = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; nKernelDepth = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) THTensor_(zero) (r_); else if (beta != 1) THTensor_(mul) (r_, r_, beta); ptr_input = THTensor_(data) (input); ptr_weight = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 4D output component wise multiplication like y <- * y.*x + beta*y */ void THTensor_(conv3Dcmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 4, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + k * istride0; /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 4D output component wise multiplication like with a * permutation map y <- y.*x + beta*y */ void THTensor_(conv3Dmap) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, THTensor * map, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; int64_t nmaps; int64_t k; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(map->nDimension == 2, 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmap : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); nmaps = map->size[0]; for (k = 0; k < nmaps; k++) { /* get indices */ int64_t from = (int64_t) THTensor_(get2d) (map, k, 0) - 1; int64_t to = (int64_t) THTensor_(get2d) (map, k, 1) - 1; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + from * istride0; /* get output */ real *ptr_output = output_data + to * nOutputDepth * nOutputRows * nOutputCols; /* do image, kernel convolution */ THTensor_(conv3d) (ptr_output, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } THTensor_(free) (input); THTensor_(free) (kernel); } #endif
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorConv.c" #else /* * 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validXCorr2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + yy * sr * ic + xx * sc; real *pw_ = k_; real sum = 0; for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } /* Update output */ *r_++ += alpha * sum; } } } else { /* SSE-based convolution */ for (yy = 0; yy < or; yy++) { real *pi_ = t_ + yy * sr * ic; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (r_, r_, pis_, alpha * pw_[kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } r_ += oc; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel. */ void THTensor_(validConv2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t xx, yy, kx, ky; if ((sc != 1) || (oc < 4)) { /* regular convolution */ for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + yy * sr * ic + xx * sc; real *pw_ = k_ + kr * kc - 1; real sum = 0; for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } /* Update output */ *r_++ += alpha * sum; } } } else { /* SSE-based convolution */ for (yy = 0; yy < or; yy++) { real *pw_ = k_ + kr * kc - 1; real *pi_ = t_ + yy * sr * ic; for (ky = 0; ky < kr; ky++) { real *pis_ = pi_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (r_, r_, pis_, alpha * pw_[-kx], oc); pis_++; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } r_ += oc; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel, full * convolution. */ void THTensor_(fullConv2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; int64_t xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + yy * sr * oc + xx * sc; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[kx]; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for (yy = 0; yy < ir; yy++) { real *po_ = r_ + yy * sr * oc; real *pw_ = k_; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (pos_, pos_, t_, alpha * pw_[kx], ic); pos_++; } po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } t_ += ic; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel, full * convolution. */ void THTensor_(fullXCorr2Dptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t oc = (ic - 1) * sc + kc; int64_t xx, yy, kx, ky; if ((sc != 1) || (ic < 4)) { /* regular convolution */ for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + yy * sr * oc + xx * sc; real *pw_ = k_ + kr * kc - 1; int64_t kx, ky; for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_++; } } } else { /* SSE-based convolution */ for (yy = 0; yy < ir; yy++) { real *po_ = r_ + yy * sr * oc; real *pw_ = k_ + kr * kc - 1; for (ky = 0; ky < kr; ky++) { real *pos_ = po_; for (kx = 0; kx < kc; kx++) { THVector_(cadd) (pos_, pos_, t_, pw_[-kx] * alpha, ic); pos_++; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } t_ += ic; } } } /* * 2D Input, 2D kernel : convolve given image with the given kernel, valid * convolution. for sr,sc=1 this is equivalent to validXCorr2Dptr, but * otherwise it is useful for calculating derivatives wrt a kernel that is * applied with stride sr,sc != 1 */ void THTensor_(validXCorr2DRevptr) (real * r_, real alpha, real * t_, int64_t ir, int64_t ic, real * k_, int64_t kr, int64_t kc, int64_t sr, int64_t sc) { int64_t or = ir - (kr - 1) * sr; int64_t oc = ic - (kc - 1) * sc; int64_t xx, yy, kx, ky; if ((sc != 1) || (kc < 4)) { /* regular convolution */ for (yy = 0; yy < kr; yy++) { for (xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy * sr * ic + xx * sc; real z = *k_++ * alpha; for (ky = 0; ky < or; ky++) { for (kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } } } } else { /* SSE-based convolution */ for (yy = 0; yy < kr; yy++) { for (xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + yy * sr * ic + xx * sc; real z = *k_++ * alpha; for (ky = 0; ky < or; ky++) { THVector_(cadd) (po_, po_, pi_, z, oc); pi_ += ic; po_ += oc; } } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validXCorr3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t zz, xx, yy; for (zz = 0; zz < ot; zz++) { for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + zz * st * ir * ic + yy * sr * ic + xx * sc; real *pw_ = k_; real sum = 0; int64_t kz, kx, ky; for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[kx]; } pi_ += ic; /* next input line */ pw_ += kc; /* next mask line */ } pi_ += (ir - kr) * ic; /* next input slice */ } /* Update output */ *r_++ += sum * alpha; } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel. */ void THTensor_(validConv3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = (it - kt) / st + 1; int64_t or = (ir - kr) / sr + 1; int64_t oc = (ic - kc) / sc + 1; int64_t zz, xx, yy; for (zz = 0; zz < ot; zz++) { for (yy = 0; yy < or; yy++) { for (xx = 0; xx < oc; xx++) { /* * Dot product in two dimensions... (between input image and * the mask) */ real *pi_ = t_ + zz * st * ir * ic + yy * sr * ic + xx * sc; real *pw_ = k_ + kt * kr * kc - 1; real sum = 0; int64_t kz, kx, ky; for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { for (kx = 0; kx < kc; kx++) { sum += pi_[kx] * pw_[-kx]; } pi_ += ic; /* next input line */ pw_ -= kc; /* next mask line */ } pi_ += (ir - kr) * ic; /* next input slice */ } /* Update output */ *r_++ += alpha * sum; } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel, full * convolution. */ void THTensor_(fullConv3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or = (ir - 1) * sr + kr; int64_t oc = (ic - 1) * sc + kc; int64_t zz, xx, yy; for (zz = 0; zz < it; zz++) { for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + zz * st * or * oc + yy * sr * oc + xx * sc; real *pw_ = k_; int64_t kz, kx, ky; /* * printf("Output Plane : %ld,%ld,%ld, input * val=%g\n",zz,yy,xx,*t_); */ for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { /* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */ po_[kx] += z * pw_[kx]; /* printf("o=%g " , po_[kx]); */ } /* printf("\n"); */ po_ += oc; /* next input line */ pw_ += kc; /* next mask line */ } po_ += (or - kr) * oc; /* next output slice */ /* printf("\n"); */ } t_++; } } } } /* * 3D Input, 3D kernel : convolve given volume with the given kernel, full * convolution. */ void THTensor_(fullXCorr3Dptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t or = (ir - 1) * sr + kr; int64_t oc = (ic - 1) * sc + kc; int64_t zz, xx, yy; for (zz = 0; zz < it; zz++) { for (yy = 0; yy < ir; yy++) { for (xx = 0; xx < ic; xx++) { /* * Outer product in two dimensions... (between input image * and the mask) */ real *po_ = r_ + zz * st * or * oc + yy * sr * oc + xx * sc; real *pw_ = k_ + kt * kr * kc - 1; int64_t kz, kx, ky; for (kz = 0; kz < kt; kz++) { for (ky = 0; ky < kr; ky++) { real z = *t_ * alpha; for (kx = 0; kx < kc; kx++) { po_[kx] += z * pw_[-kx]; } po_ += oc; /* next input line */ pw_ -= kc; /* next mask line */ } po_ += (or - kr) * oc; /* next output slice */ } t_++; } } } } /* * 3D Input, 3D kernel : convolve given image with the given kernel, valid * convolution. for sr,sc=1 this is equivalent to validXCorr3Dptr, but * otherwise it is useful for calculating derivatives wrt a kernel that is * applied with stride sr,sc != 1 */ void THTensor_(validXCorr3DRevptr) (real * r_, real alpha, real * t_, int64_t it, int64_t ir, int64_t ic, real * k_, int64_t kt, int64_t kr, int64_t kc, int64_t st, int64_t sr, int64_t sc) { int64_t ot = it - (kt - 1) * st; int64_t or = ir - (kr - 1) * sr; int64_t oc = ic - (kc - 1) * sc; int64_t zz, xx, yy; for (zz = 0; zz < kt; zz++) { for (yy = 0; yy < kr; yy++) { for (xx = 0; xx < kc; xx++) { real *po_ = r_; real *pi_ = t_ + zz * st * ir * ic + yy * sr * ic + xx * sc; real z = *k_++ * alpha; int64_t kz, kx, ky; for (kz = 0; kz < ot; kz++) { for (ky = 0; ky < or; ky++) { for (kx = 0; kx < oc; kx++) po_[kx] += z * pi_[kx]; pi_ += ic; po_ += oc; } pi_ += (ir - or) * ic; /* next input slice */ } } } } } void THTensor_(conv2d) (real * output_data, real alpha, real * ptr_input, int64_t nInputRows, int64_t nInputCols, real * ptr_weight, int64_t nKernelRows, int64_t nKernelCols, int64_t srow, int64_t scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } void THTensor_(conv3d) (real * output_data, real alpha, real * ptr_input, int64_t nInputDepth, int64_t nInputRows, int64_t nInputCols, real * ptr_weight, int64_t nKernelDepth, int64_t nKernelRows, int64_t nKernelCols, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(fullConv3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else if (*xc == 'X') THTensor_(validXCorr3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); else THTensor_(validConv3Dptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); } int64_t THTensor_(convsize) (int64_t x, int64_t k, int64_t s, const char *vf) { THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'"); if (*vf == 'V') return (x - k) / s + 1; else return (x - 1) * s + k; } /* * 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for * sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for * calculating derivatives wrt a kernel that is applied with stride sr,sc != * 1 */ void THTensor_(conv2DRevger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols, 2, "covn2DRevger : Input image is smaller than kernel"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for (k = 0; k < nKernelPlane; k++) { int64_t i; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k * nInputPlane * nOutputCols * nOutputRows + i * nOutputCols * nOutputRows; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A for * sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for * calculating derivatives wrt a kernel that is applied with stride sr,sc != * 1 */ void THTensor_(conv2DRevgerm) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol) { int64_t nbatch, nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputRows, nOutputCols; int64_t istride0, kstride0, istride1, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; istride1 = input->stride[1]; nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelPlane = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols, 2, "conv2DRevger : Input image is smaller than kernel"); THArgCheck(kernel->size[0] == input->size[0], 2, "conv2DRevger : Input batch and kernel batch is not same size"); nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for (k = 0; k < nKernelPlane; k++) { int64_t i; for (i = 0; i < nInputPlane; i++) { int64_t p; for (p = 0; p < nbatch; p++) { /* get kernel */ real *ptr_weight = weight_data + p * kstride0 + k * kstride1; /* get output */ real *ptr_output = output_data + k * nInputPlane * nOutputCols * nOutputRows + i * nOutputCols * nOutputRows; /* get input */ real *ptr_input = input_data + p * istride0 + i * istride1; /* do image, kernel convolution */ THTensor_(validXCorr2DRevptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 4D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv2Dger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelPlane, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0] * r_->size[1]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for (k = 0; k < nKernelPlane; k++) { int64_t i; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get output */ real *ptr_output = output_data + k * nInputPlane * nOutputCols * nOutputRows + i * nOutputCols * nOutputRows; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmv) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous) (k_); } else { THTensor_(retain) (k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ #pragma omp parallel for private(k) for (k = 0; k < r_->size[0]; k++) { real *ptr_output = output_data + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } #pragma omp parallel for private(k) for (k = 0; k < nOutputPlane; k++) { int64_t i; /* get output */ real *ptr_output = output_data + k * nOutputCols * nOutputRows; for (i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0 + i * kstride1; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 4D kernel, 3D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv2Dmm) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t kstride0, kstride1; THTensor *input; THTensor *kernel; int64_t nbatch; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; int64_t p; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { kernel = THTensor_(newContiguous) (k_); } else { THTensor_(retain) (k_); kernel = k_; } nbatch = input->size[0]; nInputPlane = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); if (*vf == 'F') { nOutputRows = (nInputRows - 1) * srow + nKernelRows; nOutputCols = (nInputCols - 1) * scol + nKernelCols; } else { /* valid */ nOutputRows = (nInputRows - nKernelRows) / srow + 1; nOutputCols = (nInputCols - nKernelCols) / scol + 1; } nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { /* THTensor_(zero)(r_); */ #pragma omp parallel for private(p) for (p = 0; p < r_->size[0]; p++) { int64_t k; for (k = 0; k < r_->size[1]; k++) { real *ptr_output = output_data + p * nOutputPlane * nOutputRows * nOutputCols + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] = 0.0; } } } else if (beta != 1) { /* THTensor_(mul)(r_, beta); */ #pragma omp parallel for private(p) for (p = 0; p < r_->size[0]; p++) { int64_t k; for (k = 0; k < r_->size[1]; k++) { real *ptr_output = output_data + p * nOutputPlane * nOutputRows * nOutputCols + k * nOutputCols * nOutputRows; int64_t l; for (l = 0; l < nOutputRows * nOutputCols; l++) ptr_output[l] *= beta; } } } #pragma omp parallel for private(p) for (p = 0; p < nbatch; p++) { int64_t k; for (k = 0; k < nOutputPlane; k++) { int64_t i; /* get output */ real *ptr_output = output_data + p * nOutputPlane * nOutputCols * nOutputRows + k * nOutputCols * nOutputRows; for (i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0 + i * kstride1; /* get input */ real *ptr_input = input_data + p * nInputPlane * nInputRows * nInputCols + i * nInputRows * nInputCols; /* do image, kernel convolution */ if (*vf == 'F') if (*xc == 'X') THTensor_(fullXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(fullConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else if (*xc == 'X') THTensor_(validXCorr2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); else THTensor_(validConv2Dptr) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol); } /* Next output plane */ /* output_data += nOutputCols*nOutputRows; */ } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 2D input, 2D kernel, 2D output scalar multiplication like y <- x*y + * beta*y */ void THTensor_(conv2Dmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; THTensor *kernel; int64_t nInputRows; int64_t nInputCols; int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 2, 3, "input: 2D Tensor expected"); THArgCheck(k_->nDimension == 2, 4, "kernel: 2D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputRows = input->size[0]; nInputCols = input->size[1]; nKernelRows = kernel->size[0]; nKernelCols = kernel->size[1]; THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize2d) (r_, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) THTensor_(zero) (r_); else if (beta != 1) THTensor_(mul) (r_, r_, beta); ptr_input = THTensor_(data) (input); ptr_weight = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); /* do image, kernel convolution */ THTensor_(conv2d) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 3D output component wise multiplication like y <- * y.*x + beta*y */ void THTensor_(conv2Dcmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + k * istride0; /* do image, kernel convolution */ THTensor_(conv2d) (output_data, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputCols * nOutputRows; } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 3D output component wise multiplication like with a * permutation map y <- y.*x + beta*y */ void THTensor_(conv2Dmap) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, THTensor * map, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputRows, nInputCols; int64_t nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; int64_t nmaps; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(map->nDimension == 2, 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel"); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputPlane, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); nmaps = map->size[0]; for (k = 0; k < nmaps; k++) { /* get indices */ int64_t from = (int64_t) THTensor_(get2d) (map, k, 0) - 1; int64_t to = (int64_t) THTensor_(get2d) (map, k, 1) - 1; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + from * istride0; /* get output */ real *ptr_output = output_data + to * nOutputRows * nOutputCols; /* do image, kernel convolution */ THTensor_(conv2d) (ptr_output, alpha, ptr_input, nInputRows, nInputCols, ptr_weight, nKernelRows, nKernelCols, srow, scol, vf, xc); } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A for * sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for * calculating derivatives wrt a kernel that is applied with stride sr,sc != * 1 */ void THTensor_(conv3DRevger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols, 2, "conv3DRevger : Input image is smaller than kernel"); nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth; nOutputRows = nInputRows - (nKernelRows - 1) * srow; nOutputCols = nInputCols - (nKernelCols - 1) * scol; nelem = THTensor_(nElement) (r_); THTensor_(resize5d) (r_, nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(validXCorr3DRevptr) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol); /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 5D output like rank1 update A <- xx' + beta*A */ void THTensor_(conv3Dger) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nKernelPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; nOutputPlane = nInputPlane * kernel->size[0]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize5d) (r_, nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nKernelPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; for (i = 0; i < nInputPlane; i++) { /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 5D kernel, 4D output matrix vector product like y <- Ax + beta*y */ void THTensor_(conv3Dmv) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0, kstride1; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k, i; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 5, 4, "kernel: 5D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) { kernel = THTensor_(newContiguous) (k_); } else { THTensor_(retain) (k_); kernel = k_; } nInputPlane = input->size[0]; istride0 = input->stride[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; kstride1 = kernel->stride[1]; nKernelDepth = kernel->size[2]; nKernelRows = kernel->size[3]; nKernelCols = kernel->size[4]; nOutputPlane = kernel->size[0]; THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nOutputPlane; k++) { for (i = 0; i < nInputPlane; i++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0 + i * kstride1; /* get input */ real *ptr_input = input_data + i * istride0; /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 3D input, 3D kernel, 3D output scalar multiplication like y <- x*y + * beta*y */ void THTensor_(conv3Dmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { THTensor *input; THTensor *kernel; int64_t nInputDepth; int64_t nInputRows; int64_t nInputCols; int64_t nKernelDepth; int64_t nKernelRows; int64_t nKernelCols; int64_t nOutputDepth, nOutputRows, nOutputCols; real *ptr_input; real *ptr_weight; real *output_data; ptrdiff_t nelem; THArgCheck(t_->nDimension == 3, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 3, 4, "kernel: 3D Tensor expected"); THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); nInputDepth = input->size[0]; nInputRows = input->size[1]; nInputCols = input->size[2]; nKernelDepth = kernel->size[0]; nKernelRows = kernel->size[1]; nKernelCols = kernel->size[2]; THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize3d) (r_, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) THTensor_(zero) (r_); else if (beta != 1) THTensor_(mul) (r_, r_, beta); ptr_input = THTensor_(data) (input); ptr_weight = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 4D output component wise multiplication like y <- * y.*x + beta*y */ void THTensor_(conv3Dcmul) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; real *input_data; real *weight_data; real *output_data; ptrdiff_t nelem; int64_t k; THArgCheck(t_->nDimension == 4, 3, "input: 3D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 3D Tensor expected"); THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); for (k = 0; k < nOutputPlane; k++) { /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + k * istride0; /* do image, kernel convolution */ THTensor_(conv3d) (output_data, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); /* Next output plane */ output_data += nOutputDepth * nOutputCols * nOutputRows; } THTensor_(free) (input); THTensor_(free) (kernel); } /* * 4D input, 4D kernel, 4D output component wise multiplication like with a * permutation map y <- y.*x + beta*y */ void THTensor_(conv3Dmap) (THTensor * r_, real beta, real alpha, THTensor * t_, THTensor * k_, THTensor * map, int64_t sdepth, int64_t srow, int64_t scol, const char *vf, const char *xc) { int64_t nInputPlane, nInputDepth, nInputRows, nInputCols; int64_t nKernelDepth, nKernelRows, nKernelCols; int64_t nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; int64_t istride0, kstride0; THTensor *input; THTensor *kernel; ptrdiff_t nelem; real *input_data; real *weight_data; real *output_data; int64_t nmaps; int64_t k; THArgCheck(t_->nDimension == 4, 3, "input: 4D Tensor expected"); THArgCheck(k_->nDimension == 4, 4, "kernel: 4D Tensor expected"); THArgCheck(map->nDimension == 2, 4, "map: 2D Tensor expected"); THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); input = THTensor_(newContiguous) (t_); kernel = THTensor_(newContiguous) (k_); istride0 = input->stride[0]; nInputPlane = input->size[0]; nInputDepth = input->size[1]; nInputRows = input->size[2]; nInputCols = input->size[3]; kstride0 = kernel->stride[0]; nOutputPlane = kernel->size[0]; nKernelDepth = kernel->size[1]; nKernelRows = kernel->size[2]; nKernelCols = kernel->size[3]; THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmap : Input image is smaller than kernel"); nOutputDepth = THTensor_(convsize) (nInputDepth, nKernelDepth, sdepth, vf); nOutputRows = THTensor_(convsize) (nInputRows, nKernelRows, srow, vf); nOutputCols = THTensor_(convsize) (nInputCols, nKernelCols, scol, vf); nelem = THTensor_(nElement) (r_); THTensor_(resize4d) (r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement) (r_)) { THTensor_(zero) (r_); } else if (beta != 1) THTensor_(mul) (r_, r_, beta); input_data = THTensor_(data) (input); weight_data = THTensor_(data) (kernel); output_data = THTensor_(data) (r_); nmaps = map->size[0]; for (k = 0; k < nmaps; k++) { /* get indices */ int64_t from = (int64_t) THTensor_(get2d) (map, k, 0) - 1; int64_t to = (int64_t) THTensor_(get2d) (map, k, 1) - 1; /* get kernel */ real *ptr_weight = weight_data + k * kstride0; /* get input */ real *ptr_input = input_data + from * istride0; /* get output */ real *ptr_output = output_data + to * nOutputDepth * nOutputRows * nOutputCols; /* do image, kernel convolution */ THTensor_(conv3d) (ptr_output, alpha, ptr_input, nInputDepth, nInputRows, nInputCols, ptr_weight, nKernelDepth, nKernelRows, nKernelCols, sdepth, srow, scol, vf, xc); } THTensor_(free) (input); THTensor_(free) (kernel); } #endif
threading.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifndef LIGHTGBM_UTILS_THREADING_H_ #define LIGHTGBM_UTILS_THREADING_H_ #include <LightGBM/meta.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <functional> #include <vector> namespace LightGBM { class Threading { public: template <typename INDEX_T> static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfo<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template <typename INDEX_T> static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { *out_nblock = std::min<int>( num_threads, static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template <typename INDEX_T> static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, INDEX_T max_cnt_per_block, int* out_nblock, INDEX_T* block_size) { CHECK(max_cnt_per_block >= min_cnt_per_block); *out_nblock = std::min<int>( num_threads, static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); *out_nblock = std::max<int>( *out_nblock, static_cast<int>((cnt + max_cnt_per_block - 1) / max_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template <typename INDEX_T> static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { *out_nblock = std::min<int>( num_threads, static_cast<int>((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = (cnt + (*out_nblock) - 1) / (*out_nblock); // force the block size to the times of min_cnt_per_block *block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block * min_cnt_per_block; } else { *block_size = cnt; } } template <typename INDEX_T> static inline void BlockInfoForceSize(INDEX_T cnt, INDEX_T min_cnt_per_block, int* out_nblock, INDEX_T* block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfoForceSize<INDEX_T>(num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template <typename INDEX_T> static inline int For( INDEX_T start, INDEX_T end, INDEX_T min_block_size, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) { int n_block = 1; INDEX_T num_inner = end - start; BlockInfo<INDEX_T>(num_inner, min_block_size, &n_block, &num_inner); OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < n_block; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T inner_start = start + num_inner * i; INDEX_T inner_end = std::min(end, inner_start + num_inner); inner_fun(i, inner_start, inner_end); OMP_LOOP_EX_END(); } OMP_THROW_EX(); return n_block; } }; template <typename INDEX_T, bool TWO_BUFFER> class ParallelPartitionRunner { public: ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size) : min_block_size_(min_block_size) { num_threads_ = OMP_NUM_THREADS(); left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } offsets_.resize(num_threads_); left_cnts_.resize(num_threads_); right_cnts_.resize(num_threads_); left_write_pos_.resize(num_threads_); right_write_pos_.resize(num_threads_); } ~ParallelPartitionRunner() {} void ReSize(INDEX_T num_data) { left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } } template<bool FORCE_SIZE> INDEX_T Run( INDEX_T cnt, const std::function<INDEX_T(int, INDEX_T, INDEX_T, INDEX_T*, INDEX_T*)>& func, INDEX_T* out) { int nblock = 1; INDEX_T inner_size = cnt; if (FORCE_SIZE) { Threading::BlockInfoForceSize<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock, &inner_size); } else { Threading::BlockInfo<INDEX_T>(num_threads_, cnt, min_block_size_, &nblock, &inner_size); } OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T cur_start = i * inner_size; INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start); offsets_[i] = cur_start; if (cur_cnt <= 0) { left_cnts_[i] = 0; right_cnts_[i] = 0; continue; } auto left_ptr = left_.data() + cur_start; INDEX_T* right_ptr = nullptr; if (TWO_BUFFER) { right_ptr = right_.data() + cur_start; } // split data inner, reduce the times of function called INDEX_T cur_left_count = func(i, cur_start, cur_cnt, left_ptr, right_ptr); if (!TWO_BUFFER) { // reverse for one buffer std::reverse(left_ptr + cur_left_count, left_ptr + cur_cnt); } left_cnts_[i] = cur_left_count; right_cnts_[i] = cur_cnt - cur_left_count; OMP_LOOP_EX_END(); } OMP_THROW_EX(); left_write_pos_[0] = 0; right_write_pos_[0] = 0; for (int i = 1; i < nblock; ++i) { left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1]; right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1]; } data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1]; auto right_start = out + left_cnt; #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { std::copy_n(left_.data() + offsets_[i], left_cnts_[i], out + left_write_pos_[i]); if (TWO_BUFFER) { std::copy_n(right_.data() + offsets_[i], right_cnts_[i], right_start + right_write_pos_[i]); } else { std::copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i], right_start + right_write_pos_[i]); } } return left_cnt; } private: int num_threads_; INDEX_T min_block_size_; std::vector<INDEX_T> left_; std::vector<INDEX_T> right_; std::vector<INDEX_T> offsets_; std::vector<INDEX_T> left_cnts_; std::vector<INDEX_T> right_cnts_; std::vector<INDEX_T> left_write_pos_; std::vector<INDEX_T> right_write_pos_; }; } // namespace LightGBM #endif // LightGBM_UTILS_THREADING_H_
#ifndef LIGHTGBM_UTILS_THREADING_H_ #define LIGHTGBM_UTILS_THREADING_H_ #include <LightGBM/meta.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <functional> #include <vector> namespace LightGBM { class Threading { public: template < typename INDEX_T > static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfo < INDEX_T > (num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template < typename INDEX_T > static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { *out_nblock = std::min < int >( num_threads, static_cast < int >((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template < typename INDEX_T > static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, INDEX_T max_cnt_per_block, int *out_nblock, INDEX_T * block_size) { CHECK(max_cnt_per_block >= min_cnt_per_block); *out_nblock = std: :min < int >( num_threads, static_cast < int >((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); *out_nblock = std: : max < int >( *out_nblock, static_cast < int >((cnt + max_cnt_per_block - 1) / max_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template < typename INDEX_T > static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { *out_nblock = std::min < int >( num_threads, static_cast < int >((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = (cnt + (*out_nblock) - 1) / (*out_nblock); //force the block size to the times of min_cnt_per_block * block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block * min_cnt_per_block; } else { *block_size = cnt; } } template < typename INDEX_T > static inline void BlockInfoForceSize(INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfoForceSize < INDEX_T > (num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template < typename INDEX_T > static inline int For( INDEX_T start, INDEX_T end, INDEX_T min_block_size, const std::function < void (int, INDEX_T, INDEX_T)>&inner_fun) { int n_block = 1; INDEX_T num_inner = end - start; BlockInfo < INDEX_T > (num_inner, min_block_size, &n_block, &num_inner); OMP_INIT_EX(); for (int i = 0; i < n_block; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T inner_start = start + num_inner * i; INDEX_T inner_end = std::min(end, inner_start + num_inner); inner_fun(i, inner_start, inner_end); OMP_LOOP_EX_END(); } OMP_THROW_EX(); return n_block; } }; template < typename INDEX_T, bool TWO_BUFFER > class ParallelPartitionRunner { public: ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size) : min_block_size_(min_block_size) { num_threads_ = OMP_NUM_THREADS(); left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } offsets_.resize(num_threads_); left_cnts_.resize(num_threads_); right_cnts_.resize(num_threads_); left_write_pos_.resize(num_threads_); right_write_pos_.resize(num_threads_); } ~ParallelPartitionRunner() { } void ReSize(INDEX_T num_data) { left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } } template < bool FORCE_SIZE > INDEX_T Run( INDEX_T cnt, const std: : function < INDEX_T(int, INDEX_T, INDEX_T, INDEX_T *, INDEX_T *)>&func, INDEX_T * out) { int nblock = 1; INDEX_T inner_size = cnt; if (FORCE_SIZE) { Threading: :BlockInfoForceSize < INDEX_T > (num_threads_, cnt, min_block_size_, &nblock, &inner_size); } else { Threading: :BlockInfo < INDEX_T > (num_threads_, cnt, min_block_size_, &nblock, &inner_size); } OMP_INIT_EX(); for (int i = 0; i < nblock; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T cur_start = i * inner_size; INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start); offsets_[i] = cur_start; if (cur_cnt <= 0) { left_cnts_[i] = 0; right_cnts_[i] = 0; continue; } auto left_ptr = left_.data() + cur_start; INDEX_T *right_ptr = nullptr; if (TWO_BUFFER) { right_ptr = right_.data() + cur_start; } //split data inner, reduce the times of function called INDEX_T cur_left_count = func(i, cur_start, cur_cnt, left_ptr, right_ptr); if (!TWO_BUFFER) { //reverse for one buffer std: : reverse(left_ptr + cur_left_count, left_ptr + cur_cnt); } left_cnts_[i] = cur_left_count; right_cnts_[i] = cur_cnt - cur_left_count; OMP_LOOP_EX_END(); } OMP_THROW_EX(); left_write_pos_[0] = 0; right_write_pos_[0] = 0; for (int i = 1; i < nblock; ++i) { left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1]; right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1]; } data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1]; auto right_start = out + left_cnt; for (int i = 0; i < nblock; ++i) { std: : copy_n(left_.data() + offsets_[i], left_cnts_[i], out + left_write_pos_[i]); if (TWO_BUFFER) { std: : copy_n(right_.data() + offsets_[i], right_cnts_[i], right_start + right_write_pos_[i]); } else { std: : copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i], right_start + right_write_pos_[i]); } } return left_cnt; } private: int num_threads_; INDEX_T min_block_size_; std: : vector < INDEX_T > left_; std: : vector < INDEX_T > right_; std: : vector < INDEX_T > offsets_; std: : vector < INDEX_T > left_cnts_; std: : vector < INDEX_T > right_cnts_; std: : vector < INDEX_T > left_write_pos_; std: : vector < INDEX_T > right_write_pos_; }; } //namespace LightGBM #endif /* // LightGBM_UTILS_THREADING_H_ */
#ifndef LIGHTGBM_UTILS_THREADING_H_ #define LIGHTGBM_UTILS_THREADING_H_ #include <LightGBM/meta.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/openmp_wrapper.h> #include <algorithm> #include <functional> #include <vector> namespace LightGBM { class Threading { public: template < typename INDEX_T > static inline void BlockInfo(INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfo < INDEX_T > (num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template < typename INDEX_T > static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { *out_nblock = std::min < int >( num_threads, static_cast < int >((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template < typename INDEX_T > static inline void BlockInfo(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, INDEX_T max_cnt_per_block, int *out_nblock, INDEX_T * block_size) { CHECK(max_cnt_per_block >= min_cnt_per_block); *out_nblock = std: :min < int >( num_threads, static_cast < int >((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); *out_nblock = std: : max < int >( *out_nblock, static_cast < int >((cnt + max_cnt_per_block - 1) / max_cnt_per_block)); if (*out_nblock > 1) { *block_size = SIZE_ALIGNED((cnt + (*out_nblock) - 1) / (*out_nblock)); } else { *block_size = cnt; } } template < typename INDEX_T > static inline void BlockInfoForceSize(int num_threads, INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { *out_nblock = std::min < int >( num_threads, static_cast < int >((cnt + min_cnt_per_block - 1) / min_cnt_per_block)); if (*out_nblock > 1) { *block_size = (cnt + (*out_nblock) - 1) / (*out_nblock); //force the block size to the times of min_cnt_per_block * block_size = (*block_size + min_cnt_per_block - 1) / min_cnt_per_block * min_cnt_per_block; } else { *block_size = cnt; } } template < typename INDEX_T > static inline void BlockInfoForceSize(INDEX_T cnt, INDEX_T min_cnt_per_block, int *out_nblock, INDEX_T * block_size) { int num_threads = OMP_NUM_THREADS(); BlockInfoForceSize < INDEX_T > (num_threads, cnt, min_cnt_per_block, out_nblock, block_size); } template < typename INDEX_T > static inline int For( INDEX_T start, INDEX_T end, INDEX_T min_block_size, const std::function < void (int, INDEX_T, INDEX_T)>&inner_fun) { int n_block = 1; INDEX_T num_inner = end - start; BlockInfo < INDEX_T > (num_inner, min_block_size, &n_block, &num_inner); OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < n_block; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T inner_start = start + num_inner * i; INDEX_T inner_end = std::min(end, inner_start + num_inner); inner_fun(i, inner_start, inner_end); OMP_LOOP_EX_END(); } OMP_THROW_EX(); return n_block; } }; template < typename INDEX_T, bool TWO_BUFFER > class ParallelPartitionRunner { public: ParallelPartitionRunner(INDEX_T num_data, INDEX_T min_block_size) : min_block_size_(min_block_size) { num_threads_ = OMP_NUM_THREADS(); left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } offsets_.resize(num_threads_); left_cnts_.resize(num_threads_); right_cnts_.resize(num_threads_); left_write_pos_.resize(num_threads_); right_write_pos_.resize(num_threads_); } ~ParallelPartitionRunner() { } void ReSize(INDEX_T num_data) { left_.resize(num_data); if (TWO_BUFFER) { right_.resize(num_data); } } template < bool FORCE_SIZE > INDEX_T Run( INDEX_T cnt, const std: : function < INDEX_T(int, INDEX_T, INDEX_T, INDEX_T *, INDEX_T *)>&func, INDEX_T * out) { int nblock = 1; INDEX_T inner_size = cnt; if (FORCE_SIZE) { Threading: :BlockInfoForceSize < INDEX_T > (num_threads_, cnt, min_block_size_, &nblock, &inner_size); } else { Threading: :BlockInfo < INDEX_T > (num_threads_, cnt, min_block_size_, &nblock, &inner_size); } OMP_INIT_EX(); #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { OMP_LOOP_EX_BEGIN(); INDEX_T cur_start = i * inner_size; INDEX_T cur_cnt = std::min(inner_size, cnt - cur_start); offsets_[i] = cur_start; if (cur_cnt <= 0) { left_cnts_[i] = 0; right_cnts_[i] = 0; continue; } auto left_ptr = left_.data() + cur_start; INDEX_T *right_ptr = nullptr; if (TWO_BUFFER) { right_ptr = right_.data() + cur_start; } //split data inner, reduce the times of function called INDEX_T cur_left_count = func(i, cur_start, cur_cnt, left_ptr, right_ptr); if (!TWO_BUFFER) { //reverse for one buffer std: : reverse(left_ptr + cur_left_count, left_ptr + cur_cnt); } left_cnts_[i] = cur_left_count; right_cnts_[i] = cur_cnt - cur_left_count; OMP_LOOP_EX_END(); } OMP_THROW_EX(); left_write_pos_[0] = 0; right_write_pos_[0] = 0; for (int i = 1; i < nblock; ++i) { left_write_pos_[i] = left_write_pos_[i - 1] + left_cnts_[i - 1]; right_write_pos_[i] = right_write_pos_[i - 1] + right_cnts_[i - 1]; } data_size_t left_cnt = left_write_pos_[nblock - 1] + left_cnts_[nblock - 1]; auto right_start = out + left_cnt; #pragma omp parallel for schedule(static, 1) num_threads(num_threads_) for (int i = 0; i < nblock; ++i) { std: : copy_n(left_.data() + offsets_[i], left_cnts_[i], out + left_write_pos_[i]); if (TWO_BUFFER) { std: : copy_n(right_.data() + offsets_[i], right_cnts_[i], right_start + right_write_pos_[i]); } else { std: : copy_n(left_.data() + offsets_[i] + left_cnts_[i], right_cnts_[i], right_start + right_write_pos_[i]); } } return left_cnt; } private: int num_threads_; INDEX_T min_block_size_; std: : vector < INDEX_T > left_; std: : vector < INDEX_T > right_; std: : vector < INDEX_T > offsets_; std: : vector < INDEX_T > left_cnts_; std: : vector < INDEX_T > right_cnts_; std: : vector < INDEX_T > left_write_pos_; std: : vector < INDEX_T > right_write_pos_; }; } //namespace LightGBM #endif /* // LightGBM_UTILS_THREADING_H_ */
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(32*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(32*t3+Nx+28,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients Adapted from PLUTO * and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 2; Ny = atoi(argv[2]) + 2; Nz = atoi(argv[3]) + 2; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 7); for (m = 0; m < 7; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 7; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1 = -1; t1 <= floord(Nt - 2, 16); t1++) { lbp = max(ceild(t1, 2), ceild(32 * t1 - Nt + 3, 32)); ubp = min(floord(Nt + Nz - 4, 32), floord(16 * t1 + Nz + 13, 32)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(0, ceild(t1 - 1, 2)), ceild(32 * t2 - Nz - 28, 32)); t3 <= min(min(min(floord(Nt + Ny - 4, 32), floord(16 * t1 + Ny + 29, 32)), floord(32 * t2 + Ny + 28, 32)), floord(32 * t1 - 32 * t2 + Nz + Ny + 27, 32)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 127, 128)), ceild(32 * t2 - Nz - 2044, 2048)), ceild(32 * t3 - Ny - 2044, 2048)); t4 <= min(min(min(min(floord(Nt + Nx - 4, 2048), floord(16 * t1 + Nx + 29, 2048)), floord(32 * t2 + Nx + 28, 2048)), floord(32 * t3 + Nx + 28, 2048)), floord(32 * t1 - 32 * t2 + Nz + Nx + 27, 2048)); t4++) { for (t5 = max(max(max(max(max(0, 16 * t1), 32 * t1 - 32 * t2 + 1), 32 * t2 - Nz + 2), 32 * t3 - Ny + 2), 2048 * t4 - Nx + 2); t5 <= min(min(min(min(min(Nt - 2, 16 * t1 + 31), 32 * t2 + 30), 32 * t3 + 30), 2048 * t4 + 2046), 32 * t1 - 32 * t2 + Nz + 29); t5++) { for (t6 = max(max(32 * t2, t5 + 1), -32 * t1 + 32 * t2 + 2 * t5 - 31); t6 <= min(min(32 * t2 + 31, -32 * t1 + 32 * t2 + 2 * t5), t5 + Nz - 2); t6++) { for (t7 = max(32 * t3, t5 + 1); t7 <= min(32 * t3 + 31, t5 + Ny - 2); t7++) { lbv = max(2048 * t4, t5 + 1); ubv = min(2048 * t4 + 2047, t5 + Nx - 2); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] = (((((((coef[0][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)]) + (coef[1][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6) - 1][(-t5 + t7)][(-t5 + t8)])) + (coef[2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7) - 1][(-t5 + t8)])) + (coef[3][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8) - 1])) + (coef[4][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6) + 1][(-t5 + t7)][(-t5 + t8)])) + (coef[5][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7) + 1][(-t5 + t8)])) + (coef[6][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 7; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients Adapted from PLUTO * and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 2; Ny = atoi(argv[2]) + 2; Nz = atoi(argv[3]) + 2; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 7); for (m = 0; m < 7; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 7; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1 = -1; t1 <= floord(Nt - 2, 16); t1++) { lbp = max(ceild(t1, 2), ceild(32 * t1 - Nt + 3, 32)); ubp = min(floord(Nt + Nz - 4, 32), floord(16 * t1 + Nz + 13, 32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(0, ceild(t1 - 1, 2)), ceild(32 * t2 - Nz - 28, 32)); t3 <= min(min(min(floord(Nt + Ny - 4, 32), floord(16 * t1 + Ny + 29, 32)), floord(32 * t2 + Ny + 28, 32)), floord(32 * t1 - 32 * t2 + Nz + Ny + 27, 32)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 127, 128)), ceild(32 * t2 - Nz - 2044, 2048)), ceild(32 * t3 - Ny - 2044, 2048)); t4 <= min(min(min(min(floord(Nt + Nx - 4, 2048), floord(16 * t1 + Nx + 29, 2048)), floord(32 * t2 + Nx + 28, 2048)), floord(32 * t3 + Nx + 28, 2048)), floord(32 * t1 - 32 * t2 + Nz + Nx + 27, 2048)); t4++) { for (t5 = max(max(max(max(max(0, 16 * t1), 32 * t1 - 32 * t2 + 1), 32 * t2 - Nz + 2), 32 * t3 - Ny + 2), 2048 * t4 - Nx + 2); t5 <= min(min(min(min(min(Nt - 2, 16 * t1 + 31), 32 * t2 + 30), 32 * t3 + 30), 2048 * t4 + 2046), 32 * t1 - 32 * t2 + Nz + 29); t5++) { for (t6 = max(max(32 * t2, t5 + 1), -32 * t1 + 32 * t2 + 2 * t5 - 31); t6 <= min(min(32 * t2 + 31, -32 * t1 + 32 * t2 + 2 * t5), t5 + Nz - 2); t6++) { for (t7 = max(32 * t3, t5 + 1); t7 <= min(32 * t3 + 31, t5 + Ny - 2); t7++) { lbv = max(2048 * t4, t5 + 1); ubv = min(2048 * t4 + 2047, t5 + Nx - 2); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] = (((((((coef[0][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)]) + (coef[1][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6) - 1][(-t5 + t7)][(-t5 + t8)])) + (coef[2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7) - 1][(-t5 + t8)])) + (coef[3][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8) - 1])) + (coef[4][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6) + 1][(-t5 + t7)][(-t5 + t8)])) + (coef[5][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7) + 1][(-t5 + t8)])) + (coef[6][(-t5 + t6)][(-t5 + t7)][(-t5 + t8)] * A[t5 % 2][(-t5 + t6)][(-t5 + t7)][(-t5 + t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 7; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
omp_workshare3.c
/* { dg-do compile } */ /****************************************************************************** * OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version * FILE: omp_workshare3.c * DESCRIPTION: * This example attempts to show use of the parallel for construct. However * it will generate errors at compile time. Try to determine what is causing * the error. See omp_workshare4.c for a corrected version. * SOURCE: Blaise Barney 5/99 * LAST REVISED: 03/03/2002 ******************************************************************************/ #include <omp.h> #include <stdio.h> #define N 50 #define CHUNKSIZE 5 int main () { int i, chunk, tid; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel for \ shared(a,b,c,chunk) \ private(i,tid) \ schedule(static,chunk) { /* { dg-error "expected" } */ tid = omp_get_thread_num(); for (i=0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } /* end of parallel for construct */ return 0; }
/* { dg-do compile } */ /****************************************************************************** * OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version * FILE: omp_workshare3.c * DESCRIPTION: * This example attempts to show use of the parallel for construct. However * it will generate errors at compile time. Try to determine what is causing * the error. See omp_workshare4.c for a corrected version. * SOURCE: Blaise Barney 5/99 * LAST REVISED: 03/03/2002 ******************************************************************************/ #include <omp.h> #include <stdio.h> #define N 50 #define CHUNKSIZE 5 int main() { int i, chunk, tid; float a[N], b[N], c[N]; /* Some initializations */ for (i = 0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; shared(a, b, c, chunk) \ private(i, tid) \ schedule(static, chunk) { /* { dg-error "expected" } */ tid = omp_get_thread_num(); for (i = 0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } /* end of parallel for construct */ return 0; }
/* { dg-do compile } */ /****************************************************************************** * OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version * FILE: omp_workshare3.c * DESCRIPTION: * This example attempts to show use of the parallel for construct. However * it will generate errors at compile time. Try to determine what is causing * the error. See omp_workshare4.c for a corrected version. * SOURCE: Blaise Barney 5/99 * LAST REVISED: 03/03/2002 ******************************************************************************/ #include <omp.h> #include <stdio.h> #define N 50 #define CHUNKSIZE 5 int main() { int i, chunk, tid; float a[N], b[N], c[N]; /* Some initializations */ for (i = 0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel for \ shared(a,b,c,chunk) \ private(i,tid) \ schedule(static,chunk) { /* { dg-error "expected" } */ tid = omp_get_thread_num(); for (i = 0; i < N; i++) { c[i] = a[i] + b[i]; printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); } } /* end of parallel for construct */ return 0; }
Fig_6.12_piLoopCombined.c
#include <stdio.h> #include <omp.h> #define NTHREADS 4 static long num_steps = 100000000; double step; int main () { int i; double x, pi, sum = 0.0; double start_time, run_time; step = 1.0/(double) num_steps; omp_set_num_threads(NTHREADS); start_time = omp_get_wtime(); #pragma omp parallel for private(x) reduction(+:sum) for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("pi is %f in %f seconds %d threads\n", pi, run_time); }
#include <stdio.h> #include <omp.h> #define NTHREADS 4 static long num_steps = 100000000; double step; int main() { int i; double x, pi, sum = 0.0; double start_time, run_time; step = 1.0 / (double)num_steps; omp_set_num_threads(NTHREADS); start_time = omp_get_wtime(); for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("pi is %f in %f seconds %d threads\n", pi, run_time); }
#include <stdio.h> #include <omp.h> #define NTHREADS 4 static long num_steps = 100000000; double step; int main() { int i; double x, pi, sum = 0.0; double start_time, run_time; step = 1.0 / (double)num_steps; omp_set_num_threads(NTHREADS); start_time = omp_get_wtime(); #pragma omp parallel for private(x) reduction(+:sum) for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("pi is %f in %f seconds %d threads\n", pi, run_time); }
Efficient_RANSAC.h
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // You can redistribute it and/or modify it under the terms of the GNU // General Public License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // Licensees holding a valid commercial license may use this file in // accordance with the commercial license agreement provided with the software. // // This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE // WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. // // $URL$ // $Id$ // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #include <CGAL/license/Point_set_shape_detection_3.h> #include <CGAL/Shape_detection_3/Octree.h> #include <CGAL/Shape_detection_3/Shape_base.h> #include <CGAL/Random.h> //for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> //boost -------------- #include <boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- /*! \file Efficient_RANSAC.h */ namespace CGAL { namespace Shape_detection_3 { /*! \ingroup PkgPointSetShapeDetection3 \brief A shape detection algorithm using a RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits a model of `EfficientRANSACTraits` */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< property map to access the unoriented normal of an input point typedef Shape_base<Traits> Shape; ///< shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif ///< An `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< An `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! %Parameters for the shape detection algorithm. They are explained in detail in Section \ref Point_set_shape_detection_3Parameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} FT probability; ///< Probability to control search endurance. %Default value: 5%. std::size_t min_points; ///< Minimum number of points of a shape. %Default value: 1% of total number of input points. FT epsilon; ///< Maximum tolerance Euclidian distance from a point and a shape. %Default value: 1% of bounding box diagonal. FT normal_threshold; ///< Maximum tolerance normal deviation from a point's normal to the normal on shape at projected point. %Default value: 0.9 (around 25 degrees). FT cluster_epsilon; ///< Maximum distance between points to be considered connected. %Default value: 1% of bounding box diagonal. }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection engine. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(NULL) , m_global_octree(NULL) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instances including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< range of input data. Point_map point_map = Point_map(), ///< property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers in the detection engine the shape type `ShapeType` that must inherit from `Shape_base`. For example, for registering a plane as detectable shape you should call `ransac.add_shape_factory< Shape_detection_3::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection_3::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = NULL; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = NULL; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect( const Parameters &options = Parameters() ///< %Parameters for shape detection. ) { // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); m_options = options; // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points >= m_num_available_points) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples std::size_t required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); required_samples = (std::max<std::size_t>)(required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()(m_num_available_points); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, required_samples); } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } if (failed_candidates >= 10000) force_exit = true; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = NULL; } } candidates.back() = NULL; delete best_candidate; best_candidate = NULL; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = NULL; empty++; occupied++; } candidates.resize(empty); } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = NULL; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } //2.3 Remove the points from the subtrees generated_candidates--; failed_candidates = 0; best_expected = 0; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = NULL; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = NULL; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t>(0), boost::counting_iterator<std::size_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()(m_global_octree->maxLevel() + 1); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesnt have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow((FT) 1.f - (FT) largest_candidate / FT(num_pts * octree_depth * 4), (int) num_candidates), (FT) 1); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // You can redistribute it and/or modify it under the terms of the GNU // General Public License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // Licensees holding a valid commercial license may use this file in // accordance with the commercial license agreement provided with the software. // // This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE // WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. // // $URL$ // $Id$ // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #include <CGAL/license/Point_set_shape_detection_3.h> #include <CGAL/Shape_detection_3/Octree.h> #include <CGAL/Shape_detection_3/Shape_base.h> #include <CGAL/Random.h> //for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> //boost -------------- #include <boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- /*! \file Efficient_RANSAC.h */ namespace CGAL { namespace Shape_detection_3 { /*! \ingroup PkgPointSetShapeDetection3 \brief A shape detection algorithm using a RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits a model of `EfficientRANSACTraits` */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< property map to access the unoriented normal of an input point typedef Shape_base<Traits> Shape; ///< shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif ///< An `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< An `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! %Parameters for the shape detection algorithm. They are explained in detail in Section \ref Point_set_shape_detection_3Parameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} FT probability; ///< Probability to control search endurance. %Default value: 5%. std::size_t min_points; ///< Minimum number of points of a shape. %Default value: 1% of total number of input points. FT epsilon; ///< Maximum tolerance Euclidian distance from a point and a shape. %Default value: 1% of bounding box diagonal. FT normal_threshold; ///< Maximum tolerance normal deviation from a point's normal to the normal on shape at projected point. %Default value: 0.9 (around 25 degrees). FT cluster_epsilon; ///< Maximum distance between points to be considered connected. %Default value: 1% of bounding box diagonal. }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection engine. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(NULL) , m_global_octree(NULL) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instances including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< range of input data. Point_map point_map = Point_map(), ///< property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers in the detection engine the shape type `ShapeType` that must inherit from `Shape_base`. For example, for registering a plane as detectable shape you should call `ransac.add_shape_factory< Shape_detection_3::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection_3::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = NULL; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = NULL; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect( const Parameters &options = Parameters() ///< %Parameters for shape detection. ) { // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); m_options = options; // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points >= m_num_available_points) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples std::size_t required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); required_samples = (std::max<std::size_t>)(required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()(m_num_available_points); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, required_samples); } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } if (failed_candidates >= 10000) force_exit = true; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = NULL; } } candidates.back() = NULL; delete best_candidate; best_candidate = NULL; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = NULL; empty++; occupied++; } candidates.resize(empty); } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = NULL; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } //2.3 Remove the points from the subtrees generated_candidates--; failed_candidates = 0; best_expected = 0; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive // best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = NULL; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = NULL; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t>(0), boost::counting_iterator<std::size_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()(m_global_octree->maxLevel() + 1); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesnt have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow((FT) 1.f - (FT) largest_candidate / FT(num_pts * octree_depth * 4), (int) num_candidates), (FT) 1); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // You can redistribute it and/or modify it under the terms of the GNU // General Public License as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // Licensees holding a valid commercial license may use this file in // accordance with the commercial license agreement provided with the software. // // This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE // WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. // // $URL$ // $Id$ // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H #include <CGAL/license/Point_set_shape_detection_3.h> #include <CGAL/Shape_detection_3/Octree.h> #include <CGAL/Shape_detection_3/Shape_base.h> #include <CGAL/Random.h> //for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> //boost -------------- #include <boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- /*! \file Efficient_RANSAC.h */ namespace CGAL { namespace Shape_detection_3 { /*! \ingroup PkgPointSetShapeDetection3 \brief A shape detection algorithm using a RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits a model of `EfficientRANSACTraits` */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< property map to access the unoriented normal of an input point typedef Shape_base<Traits> Shape; ///< shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif ///< An `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< An `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! %Parameters for the shape detection algorithm. They are explained in detail in Section \ref Point_set_shape_detection_3Parameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} FT probability; ///< Probability to control search endurance. %Default value: 5%. std::size_t min_points; ///< Minimum number of points of a shape. %Default value: 1% of total number of input points. FT epsilon; ///< Maximum tolerance Euclidian distance from a point and a shape. %Default value: 1% of bounding box diagonal. FT normal_threshold; ///< Maximum tolerance normal deviation from a point's normal to the normal on shape at projected point. %Default value: 0.9 (around 25 degrees). FT cluster_epsilon; ///< Maximum distance between points to be considered connected. %Default value: 1% of bounding box diagonal. }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection engine. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(NULL) , m_global_octree(NULL) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instances including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< range of input data. Point_map point_map = Point_map(), ///< property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers in the detection engine the shape type `ShapeType` that must inherit from `Shape_base`. For example, for registering a plane as detectable shape you should call `ransac.add_shape_factory< Shape_detection_3::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection_3::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = NULL; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = NULL; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect( const Parameters &options = Parameters() ///< %Parameters for shape detection. ) { // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); m_options = options; // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points >= m_num_available_points) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples std::size_t required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); required_samples = (std::max<std::size_t>)(required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()(m_num_available_points); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, required_samples); } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } if (failed_candidates >= 10000) force_exit = true; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = NULL; } } candidates.back() = NULL; delete best_candidate; best_candidate = NULL; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = NULL; empty++; occupied++; } candidates.resize(empty); } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = NULL; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } //2.3 Remove the points from the subtrees generated_candidates--; failed_candidates = 0; best_expected = 0; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = NULL; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = NULL; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t>(0), boost::counting_iterator<std::size_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()(m_global_octree->maxLevel() + 1); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesnt have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow((FT) 1.f - (FT) largest_candidate / FT(num_pts * octree_depth * 4), (int) num_candidates), (FT) 1); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_3_EFFICIENT_RANSAC_H
GB_unaryop__lnot_int64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint64 // op(A') function: GB_tran__lnot_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint64 // op(A') function: GB_tran__lnot_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_uint64 // op(A') function: GB_tran__lnot_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_uint64 ( int64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lca_comms.h
/* //@HEADER // ***************************************************************************** // // XtraPuLP: Xtreme-Scale Graph Partitioning using Label Propagation // Copyright (2016) Sandia Corporation // // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact George M. Slota (gmslota@sandia.gov) // Siva Rajamanickam (srajama@sandia.gov) // Kamesh Madduri (madduri@cse.psu.edu) // // ***************************************************************************** //@HEADER */ #ifndef _LCA_COMMS_H_ #define _LCA_COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "comms.h" #include "bicc_dist.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, verify; #define MAX_SEND_SIZE 2147483648 #define LCA_THREAD_QUEUE_SIZE 6144 struct lca_thread_data_t { int32_t tid; uint64_t* thread_queue; uint64_t* thread_finish; uint64_t thread_queue_size; uint64_t thread_finish_size; }; struct lca_queue_data_t { uint64_t* queue; uint64_t* queue_next; uint64_t* finish; uint64_t queue_size; uint64_t next_size; uint64_t finish_size; uint64_t queue_length; }; inline void init_queue_lca(dist_graph_t* g, lca_queue_data_t* lcaq){ if (debug) { printf("Task %d init_queue_lca() start\n", procid);} lcaq->queue_length = g->m_local*10;//g->n_local + g->n_ghost; lcaq->queue = (uint64_t*)malloc(lcaq->queue_length*sizeof(uint64_t)); lcaq->queue_next = (uint64_t*)malloc(lcaq->queue_length*sizeof(uint64_t)); lcaq->finish = (uint64_t*)malloc(10); if (lcaq->queue == NULL || lcaq->queue_next == NULL || lcaq->finish == NULL) throw_err("init_queue_lca(), unable to allocate resources\n",procid); lcaq->queue_size = 0; lcaq->next_size = 0; lcaq->finish_size = 0; if(debug){printf("Task %d init_queue_lca() success\n", procid); } } inline void clear_queue_lca(lca_queue_data_t* lcaq){ if(debug){ printf("Task %d clear_queue_lca() start\n",procid); } free(lcaq->queue); free(lcaq->queue_next); free(lcaq->finish); if(debug) {printf("Task %d clear_queue_lca() success\n", procid); } } inline void init_thread_lca(lca_thread_data_t* lcat) { if (debug) { printf("Task %d init_thread_queue() start\n", procid);} lcat->tid = omp_get_thread_num(); lcat->thread_queue = (uint64_t*)malloc(LCA_THREAD_QUEUE_SIZE*sizeof(uint64_t)); lcat->thread_finish = (uint64_t*)malloc(LCA_THREAD_QUEUE_SIZE*sizeof(uint64_t)); if (lcat->thread_queue == NULL || lcat->thread_finish == NULL) throw_err("init_thread_lca(), unable to allocate resources\n", procid, lcat->tid); lcat->tid = omp_get_thread_num(); lcat->thread_queue_size = 0; lcat->thread_finish_size = 0; if (debug) {printf("Task %d init_thread_queue() success\n", procid); } } inline void clear_thread_lca(lca_thread_data_t* lcat){ free(lcat->thread_queue); free(lcat->thread_finish); } inline void init_sendbuf_lca(mpi_data_t* comm){ comm->sdispls_temp[0] = 0; comm->total_send = comm->sendcounts_temp[0]; for (int32_t i = 1; i < nprocs; ++i){ comm->sdispls_temp[i] = comm->sdispls_temp[i-1] + comm->sendcounts_temp[i-1]; comm->total_send += comm->sendcounts_temp[i]; } if (debug) printf("Task %d total_send %lu\n", procid, comm->total_send); comm->sendbuf_vert = (uint64_t*)malloc(comm->total_send*sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("init_sendbuf_lca(), unable to allocate resources\n", procid); } inline void clear_recvbuf_lca(mpi_data_t* comm){ free(comm->recvbuf_vert); for (int32_t i = 0; i < nprocs; ++i) comm->sendcounts[i] = 0; for (int32_t i = 0; i < nprocs; ++i) comm->sendcounts_temp[i] = 0; } inline void add_to_lca(lca_thread_data_t* lcat, lca_queue_data_t* lcaq, uint64_t vert1, uint64_t pred1, uint64_t level1, uint64_t vert2, uint64_t pred2, uint64_t level2); inline void empty_lca_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq); inline void add_to_lca_bridge( lca_thread_data_t* lcat, lca_queue_data_t* lcaq, uint64_t vert); inline void empty_lca_queue_bridge( lca_thread_data_t* lcat, lca_queue_data_t* lcaq); // inline void add_to_finish(lca_thread_data_t* lcat, lca_queue_data_t* lcaq, // uint64_t vert1, uint64_t pred1, uint64_t level1); // inline void empty_finish_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq); inline void update_lca_send( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank); inline void empty_lca_send( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq); inline void update_lca_send_bridge( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank); inline void empty_lca_send_bridge( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq); // inline void update_lca_finish(dist_graph_t* g, // thread_comm_t* tc, mpi_data_t* comm, // lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank); //(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, // lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank); // inline void empty_lca_finish(dist_graph_t* g, // thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq); inline void exchange_lca(dist_graph_t* g, mpi_data_t* comm); inline void add_to_lca(lca_thread_data_t* lcat, lca_queue_data_t* lcaq, uint64_t vert1, uint64_t pred1, uint64_t level1, uint64_t vert2, uint64_t pred2, uint64_t level2) { lcat->thread_queue[lcat->thread_queue_size++] = vert1; lcat->thread_queue[lcat->thread_queue_size++] = pred1; lcat->thread_queue[lcat->thread_queue_size++] = level1; lcat->thread_queue[lcat->thread_queue_size++] = vert2; lcat->thread_queue[lcat->thread_queue_size++] = pred2; lcat->thread_queue[lcat->thread_queue_size++] = level2; if (lcat->thread_queue_size+6 >= LCA_THREAD_QUEUE_SIZE) empty_lca_queue(lcat, lcaq); } inline void empty_lca_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq) { uint64_t start_offset; #pragma omp atomic capture start_offset = lcaq->next_size += lcat->thread_queue_size; start_offset -= lcat->thread_queue_size; for (uint64_t i = 0; i < lcat->thread_queue_size; ++i) lcaq->queue_next[start_offset + i] = lcat->thread_queue[i]; lcat->thread_queue_size = 0; } inline void add_to_lca_bridge( lca_thread_data_t* lcat, lca_queue_data_t* lcaq, uint64_t vert) { lcat->thread_queue[lcat->thread_queue_size++] = vert; if (lcat->thread_queue_size+1 >= LCA_THREAD_QUEUE_SIZE) empty_lca_queue_bridge(lcat, lcaq); } inline void empty_lca_queue_bridge( lca_thread_data_t* lcat, lca_queue_data_t* lcaq) { uint64_t start_offset; #pragma omp atomic capture start_offset = lcaq->next_size += lcat->thread_queue_size; start_offset -= lcat->thread_queue_size; for (uint64_t i = 0; i < lcat->thread_queue_size; ++i) lcaq->queue_next[start_offset + i] = lcat->thread_queue[i]; lcat->thread_queue_size = 0; } // inline void add_to_finish(lca_thread_data_t* lcat, lca_queue_data_t* lcaq, // uint64_t vert1, uint64_t pred1, uint64_t level1) // { // lcat->thread_finish[lcat->thread_finish_size++] = vert1; // lcat->thread_finish[lcat->thread_finish_size++] = pred1; // lcat->thread_finish[lcat->thread_finish_size++] = level1; // if (lcat->thread_finish_size+3 >= LCA_THREAD_QUEUE_SIZE) // empty_finish_queue(lcat, lcaq); // } // inline void empty_finish_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq) // { // uint64_t start_offset; // #pragma omp atomic capture // start_offset = lcaq->finish_size += lcat->thread_finish_size; // start_offset -= lcat->thread_finish_size; // for (uint64_t i = 0; i < lcat->thread_finish_size; ++i) // lcaq->finish[start_offset + i] = lcat->thread_finish[i]; // lcat->thread_finish_size = 0; // } inline void update_lca_send( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank) { tc->sendbuf_rank_thread[tc->thread_queue_size/6] = send_rank; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+1]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+2]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+3]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+4]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+5]; //++tc->thread_queue_size; //++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size+6 >= LCA_THREAD_QUEUE_SIZE) empty_lca_send(tc, comm, lcaq); } inline void empty_lca_send( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; i+=6) { int32_t cur_rank = tc->sendbuf_rank_thread[i/6]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]+1] = tc->sendbuf_vert_thread[i+1]; comm->sendbuf_vert[tc->thread_starts[cur_rank]+2] = tc->sendbuf_vert_thread[i+2]; comm->sendbuf_vert[tc->thread_starts[cur_rank]+3] = tc->sendbuf_vert_thread[i+3]; comm->sendbuf_vert[tc->thread_starts[cur_rank]+4] = tc->sendbuf_vert_thread[i+4]; comm->sendbuf_vert[tc->thread_starts[cur_rank]+5] = tc->sendbuf_vert_thread[i+5]; tc->thread_starts[cur_rank] += 6; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } inline void update_lca_send_bridge( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank) { tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; if (tc->thread_queue_size+1 >= LCA_THREAD_QUEUE_SIZE) empty_lca_send_bridge(tc, comm, lcaq); } inline void empty_lca_send_bridge( thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; tc->thread_starts[cur_rank] += 1; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } // inline void update_lca_finish(dist_graph_t* g, // thread_comm_t* tc, mpi_data_t* comm, // lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank) // { // // for (int32_t i = 0; i < nprocs; ++i) // // tc->v_to_rank[i] = false; // // uint64_t out_degree = out_degree(g, vert_index); // // uint64_t* outs = out_vertices(g, vert_index); // // for (uint64_t j = 0; j < out_degree; ++j) // // { // // uint64_t out_index = outs[j]; // // if (out_index >= g->n_local) // // { // // int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; // // if (!tc->v_to_rank[out_rank]) // // { // // tc->v_to_rank[out_rank] = true; // // add_vid_data_to_send(tc, comm, // // g->local_unmap[vert_index], data, out_rank); // // } // // } // // } // //tc->sendbuf_rank_thread[tc->thread_queue_size/3] = send_rank; // tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index]; // tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index+1]; // tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index+2]; // //++tc->thread_queue_size; // //++tc->sendcounts_thread[send_rank]; // if (tc->thread_queue_size+6 >= LCA_THREAD_QUEUE_SIZE) // empty_lca_finish(g, tc, comm, lcaq); // } // inline void add_data_to_finish(thread_comm_t* tc, mpi_data_t* comm, // lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank) // { // tc->sendbuf_rank_thread[tc->thread_queue_size/3] = send_rank; // tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; // tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+1]; // tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+2]; // ++tc->thread_queue_size; // ++tc->sendcounts_thread[send_rank]; // if (tc->thread_queue_size+3 >= LCA_THREAD_QUEUE_SIZE) // empty_lca_finish(tc, comm, lcaq); // } // inline void empty_lca_finish(dist_graph_t* g, // thread_comm_t* tc, mpi_data_t* comm, lca_queue_data_t* lcaq) // { // for (int32_t i = 0; i < nprocs; ++i) // { // #pragma omp atomic capture // tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; // tc->thread_starts[i] -= tc->sendcounts_thread[i]; // } // for (uint64_t i = 0; i < tc->thread_queue_size; i+=3) // { // int32_t cur_rank = get_rank(g, tc->sendbuf_vert_thread[i]); // comm->sendbuf_vert[tc->thread_starts[cur_rank]] = // tc->sendbuf_vert_thread[i]; // comm->sendbuf_vert[tc->thread_starts[cur_rank]+1] = // tc->sendbuf_vert_thread[i+1]; // comm->sendbuf_vert[tc->thread_starts[cur_rank]+2] = // tc->sendbuf_vert_thread[i+2]; // tc->thread_starts[cur_rank] += 3; // } // for (int32_t i = 0; i < nprocs; ++i) // { // tc->thread_starts[i] = 0; // tc->sendcounts_thread[i] = 0; // } // tc->thread_queue_size = 0; // } inline void exchange_lca(dist_graph_t* g, mpi_data_t* comm) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; for (int32_t i = 0; i < nprocs; ++i) comm->sdispls_temp[i] -= comm->sendcounts_temp[i]; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; if (debug) printf("Task %d total_recv %lu\n", procid, comm->total_recv); comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t)); if (comm->recvbuf_vert == NULL) throw_err("exchange_lca() unable to allocate recv buffers", procid); uint64_t task_queue_size = comm->total_send; uint64_t current_global_size = 0; MPI_Allreduce(&task_queue_size, &current_global_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = current_global_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); if (buf_v == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t data = comm->sendbuf_vert[comm->sdispls_temp[i]+j]; buf_v[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); sum_recv += cur_recv; sum_send += cur_send; } free(comm->sendbuf_vert); assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); } #endif
#ifndef _LCA_COMMS_H_ #define _LCA_COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "comms.h" #include "bicc_dist.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, verify; #define MAX_SEND_SIZE 2147483648 #define LCA_THREAD_QUEUE_SIZE 6144 struct lca_thread_data_t { int32_t tid; uint64_t *thread_queue; uint64_t *thread_finish; uint64_t thread_queue_size; uint64_t thread_finish_size; }; struct lca_queue_data_t { uint64_t *queue; uint64_t *queue_next; uint64_t *finish; uint64_t queue_size; uint64_t next_size; uint64_t finish_size; uint64_t queue_length; }; inline void init_queue_lca(dist_graph_t * g, lca_queue_data_t * lcaq) { if (debug) { printf("Task %d init_queue_lca() start\n", procid); } lcaq->queue_length = g->m_local * 10; //g->n_local + g->n_ghost; lcaq->queue = (uint64_t *) malloc(lcaq->queue_length * sizeof(uint64_t)); lcaq->queue_next = (uint64_t *) malloc(lcaq->queue_length * sizeof(uint64_t)); lcaq->finish = (uint64_t *) malloc(10); if (lcaq->queue == NULL || lcaq->queue_next == NULL || lcaq->finish == NULL) throw_err("init_queue_lca(), unable to allocate resources\n", procid); lcaq->queue_size = 0; lcaq->next_size = 0; lcaq->finish_size = 0; if (debug) { printf("Task %d init_queue_lca() success\n", procid); } } inline void clear_queue_lca(lca_queue_data_t * lcaq) { if (debug) { printf("Task %d clear_queue_lca() start\n", procid); } free(lcaq->queue); free(lcaq->queue_next); free(lcaq->finish); if (debug) { printf("Task %d clear_queue_lca() success\n", procid); } } inline void init_thread_lca(lca_thread_data_t * lcat) { if (debug) { printf("Task %d init_thread_queue() start\n", procid); } lcat->tid = omp_get_thread_num(); lcat->thread_queue = (uint64_t *) malloc(LCA_THREAD_QUEUE_SIZE * sizeof(uint64_t)); lcat->thread_finish = (uint64_t *) malloc(LCA_THREAD_QUEUE_SIZE * sizeof(uint64_t)); if (lcat->thread_queue == NULL || lcat->thread_finish == NULL) throw_err("init_thread_lca(), unable to allocate resources\n", procid, lcat->tid); lcat->tid = omp_get_thread_num(); lcat->thread_queue_size = 0; lcat->thread_finish_size = 0; if (debug) { printf("Task %d init_thread_queue() success\n", procid); } } inline void clear_thread_lca(lca_thread_data_t * lcat) { free(lcat->thread_queue); free(lcat->thread_finish); } inline void init_sendbuf_lca(mpi_data_t * comm) { comm->sdispls_temp[0] = 0; comm->total_send = comm->sendcounts_temp[0]; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls_temp[i] = comm->sdispls_temp[i - 1] + comm->sendcounts_temp[i - 1]; comm->total_send += comm->sendcounts_temp[i]; } if (debug) printf("Task %d total_send %lu\n", procid, comm->total_send); comm->sendbuf_vert = (uint64_t *) malloc(comm->total_send * sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("init_sendbuf_lca(), unable to allocate resources\n", procid); } inline void clear_recvbuf_lca(mpi_data_t * comm) { free(comm->recvbuf_vert); for (int32_t i = 0; i < nprocs; ++i) comm->sendcounts[i] = 0; for (int32_t i = 0; i < nprocs; ++i) comm->sendcounts_temp[i] = 0; } inline void add_to_lca(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert1, uint64_t pred1, uint64_t level1, uint64_t vert2, uint64_t pred2, uint64_t level2); inline void empty_lca_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq); inline void add_to_lca_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert); inline void empty_lca_queue_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq); //inline void add_to_finish(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, //uint64_t vert1, uint64_t pred1, uint64_t level1); //inline void empty_finish_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq); inline void update_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); inline void empty_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq); inline void update_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); inline void empty_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq); //inline void update_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); //(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); //inline void empty_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq); inline void exchange_lca(dist_graph_t * g, mpi_data_t * comm); inline void add_to_lca(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert1, uint64_t pred1, uint64_t level1, uint64_t vert2, uint64_t pred2, uint64_t level2) { lcat->thread_queue[lcat->thread_queue_size++] = vert1; lcat->thread_queue[lcat->thread_queue_size++] = pred1; lcat->thread_queue[lcat->thread_queue_size++] = level1; lcat->thread_queue[lcat->thread_queue_size++] = vert2; lcat->thread_queue[lcat->thread_queue_size++] = pred2; lcat->thread_queue[lcat->thread_queue_size++] = level2; if (lcat->thread_queue_size + 6 >= LCA_THREAD_QUEUE_SIZE) empty_lca_queue(lcat, lcaq); } inline void empty_lca_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq) { uint64_t start_offset; start_offset = lcaq->next_size += lcat->thread_queue_size; start_offset -= lcat->thread_queue_size; for (uint64_t i = 0; i < lcat->thread_queue_size; ++i) lcaq->queue_next[start_offset + i] = lcat->thread_queue[i]; lcat->thread_queue_size = 0; } inline void add_to_lca_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert) { lcat->thread_queue[lcat->thread_queue_size++] = vert; if (lcat->thread_queue_size + 1 >= LCA_THREAD_QUEUE_SIZE) empty_lca_queue_bridge(lcat, lcaq); } inline void empty_lca_queue_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq) { uint64_t start_offset; start_offset = lcaq->next_size += lcat->thread_queue_size; start_offset -= lcat->thread_queue_size; for (uint64_t i = 0; i < lcat->thread_queue_size; ++i) lcaq->queue_next[start_offset + i] = lcat->thread_queue[i]; lcat->thread_queue_size = 0; } //inline void add_to_finish(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, //uint64_t vert1, uint64_t pred1, uint64_t level1) // { //lcat->thread_finish[lcat->thread_finish_size++] = vert1; //lcat->thread_finish[lcat->thread_finish_size++] = pred1; //lcat->thread_finish[lcat->thread_finish_size++] = level1; //if (lcat->thread_finish_size + 3 >= LCA_THREAD_QUEUE_SIZE) //empty_finish_queue(lcat, lcaq); // } //inline void empty_finish_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq) // { //uint64_t start_offset; // //start_offset = lcaq->finish_size += lcat->thread_finish_size; //start_offset -= lcat->thread_finish_size; //for (uint64_t i = 0; i < lcat->thread_finish_size; ++i) //lcaq->finish[start_offset + i] = lcat->thread_finish[i]; //lcat->thread_finish_size = 0; // } inline void update_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) { tc->sendbuf_rank_thread[tc->thread_queue_size / 6] = send_rank; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 1]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 2]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 3]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 4]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 5]; //++tc->thread_queue_size; //++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size + 6 >= LCA_THREAD_QUEUE_SIZE) empty_lca_send(tc, comm, lcaq); } inline void empty_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq) { for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; i += 6) { int32_t cur_rank = tc->sendbuf_rank_thread[i / 6]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 1] = tc->sendbuf_vert_thread[i + 1]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 2] = tc->sendbuf_vert_thread[i + 2]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 3] = tc->sendbuf_vert_thread[i + 3]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 4] = tc->sendbuf_vert_thread[i + 4]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 5] = tc->sendbuf_vert_thread[i + 5]; tc->thread_starts[cur_rank] += 6; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } inline void update_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) { tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; if (tc->thread_queue_size + 1 >= LCA_THREAD_QUEUE_SIZE) empty_lca_send_bridge(tc, comm, lcaq); } inline void empty_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq) { for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; tc->thread_starts[cur_rank] += 1; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } //inline void update_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) // { ////for (int32_t i = 0; i < nprocs; ++i) ////tc->v_to_rank[i] = false; ////uint64_t out_degree = out_degree(g, vert_index); ////uint64_t * outs = out_vertices(g, vert_index); ////for (uint64_t j = 0; j < out_degree; ++j) //// { ////uint64_t out_index = outs[j]; ////if (out_index >= g->n_local) //// { ////int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; ////if (!tc->v_to_rank[out_rank]) //// { ////tc->v_to_rank[out_rank] = true; ////add_vid_data_to_send(tc, comm, ////g->local_unmap[vert_index], data, out_rank); //// } //// } //// } ////tc->sendbuf_rank_thread[tc->thread_queue_size / 3] = send_rank; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index + 1]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index + 2]; ////++tc->thread_queue_size; ////++tc->sendcounts_thread[send_rank]; //if (tc->thread_queue_size + 6 >= LCA_THREAD_QUEUE_SIZE) //empty_lca_finish(g, tc, comm, lcaq); // } //inline void add_data_to_finish(thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) // { //tc->sendbuf_rank_thread[tc->thread_queue_size / 3] = send_rank; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 1]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 2]; //++tc->thread_queue_size; //++tc->sendcounts_thread[send_rank]; //if (tc->thread_queue_size + 3 >= LCA_THREAD_QUEUE_SIZE) //empty_lca_finish(tc, comm, lcaq); // } //inline void empty_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq) // { //for (int32_t i = 0; i < nprocs; ++i) // { // //tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; //tc->thread_starts[i] -= tc->sendcounts_thread[i]; // } //for (uint64_t i = 0; i < tc->thread_queue_size; i += 3) // { //int32_t cur_rank = get_rank(g, tc->sendbuf_vert_thread[i]); //comm->sendbuf_vert[tc->thread_starts[cur_rank]] = //tc->sendbuf_vert_thread[i]; //comm->sendbuf_vert[tc->thread_starts[cur_rank] + 1] = //tc->sendbuf_vert_thread[i + 1]; //comm->sendbuf_vert[tc->thread_starts[cur_rank] + 2] = //tc->sendbuf_vert_thread[i + 2]; //tc->thread_starts[cur_rank] += 3; // } //for (int32_t i = 0; i < nprocs; ++i) // { //tc->thread_starts[i] = 0; //tc->sendcounts_thread[i] = 0; // } //tc->thread_queue_size = 0; // } inline void exchange_lca(dist_graph_t * g, mpi_data_t * comm) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; for (int32_t i = 0; i < nprocs; ++i) comm->sdispls_temp[i] -= comm->sendcounts_temp[i]; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; if (debug) printf("Task %d total_recv %lu\n", procid, comm->total_recv); comm->recvbuf_vert = (uint64_t *) malloc(comm->total_recv * sizeof(uint64_t)); if (comm->recvbuf_vert == NULL) throw_err("exchange_lca() unable to allocate recv buffers", procid); uint64_t task_queue_size = comm->total_send; uint64_t current_global_size = 0; MPI_Allreduce(&task_queue_size, &current_global_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = current_global_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_v = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_v == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t data = comm->sendbuf_vert[comm->sdispls_temp[i] + j]; buf_v[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); sum_recv += cur_recv; sum_send += cur_send; } free(comm->sendbuf_vert); assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); } #endif
#ifndef _LCA_COMMS_H_ #define _LCA_COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "comms.h" #include "bicc_dist.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, verify; #define MAX_SEND_SIZE 2147483648 #define LCA_THREAD_QUEUE_SIZE 6144 struct lca_thread_data_t { int32_t tid; uint64_t *thread_queue; uint64_t *thread_finish; uint64_t thread_queue_size; uint64_t thread_finish_size; }; struct lca_queue_data_t { uint64_t *queue; uint64_t *queue_next; uint64_t *finish; uint64_t queue_size; uint64_t next_size; uint64_t finish_size; uint64_t queue_length; }; inline void init_queue_lca(dist_graph_t * g, lca_queue_data_t * lcaq) { if (debug) { printf("Task %d init_queue_lca() start\n", procid); } lcaq->queue_length = g->m_local * 10; //g->n_local + g->n_ghost; lcaq->queue = (uint64_t *) malloc(lcaq->queue_length * sizeof(uint64_t)); lcaq->queue_next = (uint64_t *) malloc(lcaq->queue_length * sizeof(uint64_t)); lcaq->finish = (uint64_t *) malloc(10); if (lcaq->queue == NULL || lcaq->queue_next == NULL || lcaq->finish == NULL) throw_err("init_queue_lca(), unable to allocate resources\n", procid); lcaq->queue_size = 0; lcaq->next_size = 0; lcaq->finish_size = 0; if (debug) { printf("Task %d init_queue_lca() success\n", procid); } } inline void clear_queue_lca(lca_queue_data_t * lcaq) { if (debug) { printf("Task %d clear_queue_lca() start\n", procid); } free(lcaq->queue); free(lcaq->queue_next); free(lcaq->finish); if (debug) { printf("Task %d clear_queue_lca() success\n", procid); } } inline void init_thread_lca(lca_thread_data_t * lcat) { if (debug) { printf("Task %d init_thread_queue() start\n", procid); } lcat->tid = omp_get_thread_num(); lcat->thread_queue = (uint64_t *) malloc(LCA_THREAD_QUEUE_SIZE * sizeof(uint64_t)); lcat->thread_finish = (uint64_t *) malloc(LCA_THREAD_QUEUE_SIZE * sizeof(uint64_t)); if (lcat->thread_queue == NULL || lcat->thread_finish == NULL) throw_err("init_thread_lca(), unable to allocate resources\n", procid, lcat->tid); lcat->tid = omp_get_thread_num(); lcat->thread_queue_size = 0; lcat->thread_finish_size = 0; if (debug) { printf("Task %d init_thread_queue() success\n", procid); } } inline void clear_thread_lca(lca_thread_data_t * lcat) { free(lcat->thread_queue); free(lcat->thread_finish); } inline void init_sendbuf_lca(mpi_data_t * comm) { comm->sdispls_temp[0] = 0; comm->total_send = comm->sendcounts_temp[0]; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls_temp[i] = comm->sdispls_temp[i - 1] + comm->sendcounts_temp[i - 1]; comm->total_send += comm->sendcounts_temp[i]; } if (debug) printf("Task %d total_send %lu\n", procid, comm->total_send); comm->sendbuf_vert = (uint64_t *) malloc(comm->total_send * sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("init_sendbuf_lca(), unable to allocate resources\n", procid); } inline void clear_recvbuf_lca(mpi_data_t * comm) { free(comm->recvbuf_vert); for (int32_t i = 0; i < nprocs; ++i) comm->sendcounts[i] = 0; for (int32_t i = 0; i < nprocs; ++i) comm->sendcounts_temp[i] = 0; } inline void add_to_lca(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert1, uint64_t pred1, uint64_t level1, uint64_t vert2, uint64_t pred2, uint64_t level2); inline void empty_lca_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq); inline void add_to_lca_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert); inline void empty_lca_queue_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq); //inline void add_to_finish(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, //uint64_t vert1, uint64_t pred1, uint64_t level1); //inline void empty_finish_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq); inline void update_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); inline void empty_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq); inline void update_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); inline void empty_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq); //inline void update_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); //(dist_graph_t * g, thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank); //inline void empty_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq); inline void exchange_lca(dist_graph_t * g, mpi_data_t * comm); inline void add_to_lca(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert1, uint64_t pred1, uint64_t level1, uint64_t vert2, uint64_t pred2, uint64_t level2) { lcat->thread_queue[lcat->thread_queue_size++] = vert1; lcat->thread_queue[lcat->thread_queue_size++] = pred1; lcat->thread_queue[lcat->thread_queue_size++] = level1; lcat->thread_queue[lcat->thread_queue_size++] = vert2; lcat->thread_queue[lcat->thread_queue_size++] = pred2; lcat->thread_queue[lcat->thread_queue_size++] = level2; if (lcat->thread_queue_size + 6 >= LCA_THREAD_QUEUE_SIZE) empty_lca_queue(lcat, lcaq); } inline void empty_lca_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq) { uint64_t start_offset; #pragma omp atomic capture start_offset = lcaq->next_size += lcat->thread_queue_size; start_offset -= lcat->thread_queue_size; for (uint64_t i = 0; i < lcat->thread_queue_size; ++i) lcaq->queue_next[start_offset + i] = lcat->thread_queue[i]; lcat->thread_queue_size = 0; } inline void add_to_lca_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq, uint64_t vert) { lcat->thread_queue[lcat->thread_queue_size++] = vert; if (lcat->thread_queue_size + 1 >= LCA_THREAD_QUEUE_SIZE) empty_lca_queue_bridge(lcat, lcaq); } inline void empty_lca_queue_bridge( lca_thread_data_t * lcat, lca_queue_data_t * lcaq) { uint64_t start_offset; #pragma omp atomic capture start_offset = lcaq->next_size += lcat->thread_queue_size; start_offset -= lcat->thread_queue_size; for (uint64_t i = 0; i < lcat->thread_queue_size; ++i) lcaq->queue_next[start_offset + i] = lcat->thread_queue[i]; lcat->thread_queue_size = 0; } //inline void add_to_finish(lca_thread_data_t * lcat, lca_queue_data_t * lcaq, //uint64_t vert1, uint64_t pred1, uint64_t level1) // { //lcat->thread_finish[lcat->thread_finish_size++] = vert1; //lcat->thread_finish[lcat->thread_finish_size++] = pred1; //lcat->thread_finish[lcat->thread_finish_size++] = level1; //if (lcat->thread_finish_size + 3 >= LCA_THREAD_QUEUE_SIZE) //empty_finish_queue(lcat, lcaq); // } //inline void empty_finish_queue(lca_thread_data_t * lcat, lca_queue_data_t * lcaq) // { //uint64_t start_offset; // #pragma omp atomic capture //start_offset = lcaq->finish_size += lcat->thread_finish_size; //start_offset -= lcat->thread_finish_size; //for (uint64_t i = 0; i < lcat->thread_finish_size; ++i) //lcaq->finish[start_offset + i] = lcat->thread_finish[i]; //lcat->thread_finish_size = 0; // } inline void update_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) { tc->sendbuf_rank_thread[tc->thread_queue_size / 6] = send_rank; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 1]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 2]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 3]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 4]; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 5]; //++tc->thread_queue_size; //++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size + 6 >= LCA_THREAD_QUEUE_SIZE) empty_lca_send(tc, comm, lcaq); } inline void empty_lca_send( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; i += 6) { int32_t cur_rank = tc->sendbuf_rank_thread[i / 6]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 1] = tc->sendbuf_vert_thread[i + 1]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 2] = tc->sendbuf_vert_thread[i + 2]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 3] = tc->sendbuf_vert_thread[i + 3]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 4] = tc->sendbuf_vert_thread[i + 4]; comm->sendbuf_vert[tc->thread_starts[cur_rank] + 5] = tc->sendbuf_vert_thread[i + 5]; tc->thread_starts[cur_rank] += 6; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } inline void update_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) { tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; if (tc->thread_queue_size + 1 >= LCA_THREAD_QUEUE_SIZE) empty_lca_send_bridge(tc, comm, lcaq); } inline void empty_lca_send_bridge( thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; tc->thread_starts[cur_rank] += 1; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } //inline void update_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) // { ////for (int32_t i = 0; i < nprocs; ++i) ////tc->v_to_rank[i] = false; ////uint64_t out_degree = out_degree(g, vert_index); ////uint64_t * outs = out_vertices(g, vert_index); ////for (uint64_t j = 0; j < out_degree; ++j) //// { ////uint64_t out_index = outs[j]; ////if (out_index >= g->n_local) //// { ////int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; ////if (!tc->v_to_rank[out_rank]) //// { ////tc->v_to_rank[out_rank] = true; ////add_vid_data_to_send(tc, comm, ////g->local_unmap[vert_index], data, out_rank); //// } //// } //// } ////tc->sendbuf_rank_thread[tc->thread_queue_size / 3] = send_rank; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index + 1]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index + 2]; ////++tc->thread_queue_size; ////++tc->sendcounts_thread[send_rank]; //if (tc->thread_queue_size + 6 >= LCA_THREAD_QUEUE_SIZE) //empty_lca_finish(g, tc, comm, lcaq); // } //inline void add_data_to_finish(thread_comm_t * tc, mpi_data_t * comm, //lca_queue_data_t * lcaq, uint64_t index, int32_t send_rank) // { //tc->sendbuf_rank_thread[tc->thread_queue_size / 3] = send_rank; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 1]; //tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index + 2]; //++tc->thread_queue_size; //++tc->sendcounts_thread[send_rank]; //if (tc->thread_queue_size + 3 >= LCA_THREAD_QUEUE_SIZE) //empty_lca_finish(tc, comm, lcaq); // } //inline void empty_lca_finish(dist_graph_t * g, //thread_comm_t * tc, mpi_data_t * comm, lca_queue_data_t * lcaq) // { //for (int32_t i = 0; i < nprocs; ++i) // { // #pragma omp atomic capture //tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i]; //tc->thread_starts[i] -= tc->sendcounts_thread[i]; // } //for (uint64_t i = 0; i < tc->thread_queue_size; i += 3) // { //int32_t cur_rank = get_rank(g, tc->sendbuf_vert_thread[i]); //comm->sendbuf_vert[tc->thread_starts[cur_rank]] = //tc->sendbuf_vert_thread[i]; //comm->sendbuf_vert[tc->thread_starts[cur_rank] + 1] = //tc->sendbuf_vert_thread[i + 1]; //comm->sendbuf_vert[tc->thread_starts[cur_rank] + 2] = //tc->sendbuf_vert_thread[i + 2]; //tc->thread_starts[cur_rank] += 3; // } //for (int32_t i = 0; i < nprocs; ++i) // { //tc->thread_starts[i] = 0; //tc->sendcounts_thread[i] = 0; // } //tc->thread_queue_size = 0; // } inline void exchange_lca(dist_graph_t * g, mpi_data_t * comm) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; for (int32_t i = 0; i < nprocs; ++i) comm->sdispls_temp[i] -= comm->sendcounts_temp[i]; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; if (debug) printf("Task %d total_recv %lu\n", procid, comm->total_recv); comm->recvbuf_vert = (uint64_t *) malloc(comm->total_recv * sizeof(uint64_t)); if (comm->recvbuf_vert == NULL) throw_err("exchange_lca() unable to allocate recv buffers", procid); uint64_t task_queue_size = comm->total_send; uint64_t current_global_size = 0; MPI_Allreduce(&task_queue_size, &current_global_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = current_global_size / (uint64_t) MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t) (send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i - 1] + comm->sendcounts[i - 1]; comm->rdispls[i] = comm->rdispls[i - 1] + comm->recvcounts[i - 1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs - 1] + comm->sendcounts[nprocs - 1]; int32_t cur_recv = comm->rdispls[nprocs - 1] + comm->recvcounts[nprocs - 1]; uint64_t *buf_v = (uint64_t *) malloc((uint64_t) (cur_send) * sizeof(uint64_t)); if (buf_v == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms - 1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t data = comm->sendbuf_vert[comm->sdispls_temp[i] + j]; buf_v[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert + sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); sum_recv += cur_recv; sum_send += cur_send; } free(comm->sendbuf_vert); assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); } #endif
convolution_1x1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _pn = vld1q_f32(r0+4); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); float32x4_t _p1 = vld1q_f32(r1); float32x4_t _p1n = vld1q_f32(r1+4); _outp = vfmaq_f32(_outp, _p1, _k1); _outpn = vfmaq_f32(_outpn, _p1n, _k1); float32x4_t _p2 = vld1q_f32(r2); float32x4_t _p2n = vld1q_f32(r2+4); _outp = vfmaq_f32(_outp, _p2, _k2); _outpn = vfmaq_f32(_outpn, _p2n, _k2); float32x4_t _p3 = vld1q_f32(r3); float32x4_t _p3n = vld1q_f32(r3+4); _outp = vfmaq_f32(_outp, _p3, _k3); _outpn = vfmaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else int remain = size; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn>0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _pn = vld1q_f32(r0+4); float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 8; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+3<inch; q+=4) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* img2 = bottom_blob.channel(q+2); const float* img3 = bottom_blob.channel(q+3); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float* r0 = img0; const float* r1 = img1; const float* r2 = img2; const float* r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0+8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); float32x4x2_t _p1x2 = vld2q_f32(r1); float32x4_t _p1 = _p1x2.val[0]; float32x4x2_t _p1nx2 = vld2q_f32(r1+8); float32x4_t _p1n = _p1nx2.val[0]; _outp = vmlaq_f32(_outp, _p1, _k1); _outpn = vmlaq_f32(_outpn, _p1n, _k1); float32x4x2_t _p2x2 = vld2q_f32(r2); float32x4_t _p2 = _p2x2.val[0]; float32x4x2_t _p2nx2 = vld2q_f32(r2+8); float32x4_t _p2n = _p2nx2.val[0]; _outp = vmlaq_f32(_outp, _p2, _k2); _outpn = vmlaq_f32(_outpn, _p2n, _k2); float32x4x2_t _p3x2 = vld2q_f32(r3); float32x4_t _p3 = _p3x2.val[0]; float32x4x2_t _p3nx2 = vld2q_f32(r3+8); float32x4_t _p3n = _p3nx2.val[0]; _outp = vmlaq_f32(_outp, _p3, _k3); _outpn = vmlaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), // %12 "w"(_k1), // %13 "w"(_k2), // %14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch + q; const float k0 = kernel0[0]; const float* r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn>0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0+8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr+4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr+4, _outpn); r0 += 16; outptr += 8; } #else if (nn > 0) { asm volatile( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2017 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except //in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed //under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR //CONDITIONS OF ANY KIND, either express or implied.See the License for the //specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif /* // __ARM_NEON */ static void conv1x1s1_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Mat & _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float *kernel = _kernel; const float *bias = _bias; for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0. f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *img1 = bottom_blob.channel(q + 1); const float *img2 = bottom_blob.channel(q + 2); const float *img3 = bottom_blob.channel(q + 3); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float *r0 = img0; const float *r1 = img1; const float *r2 = img2; const float *r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else /* */ int remain = size; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _pn = vld1q_f32(r0 + 4); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); float32x4_t _p1 = vld1q_f32(r1); float32x4_t _p1n = vld1q_f32(r1 + 4); _outp = vfmaq_f32(_outp, _p1, _k1); _outpn = vfmaq_f32(_outpn, _p1n, _k1); float32x4_t _p2 = vld1q_f32(r2); float32x4_t _p2n = vld1q_f32(r2 + 4); _outp = vfmaq_f32(_outp, _p2, _k2); _outpn = vfmaq_f32(_outpn, _p2n, _k2); float32x4_t _p3 = vld1q_f32(r3); float32x4_t _p3n = vld1q_f32(r3 + 4); _outp = vfmaq_f32(_outp, _p3, _k3); _outpn = vfmaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0), //%2 "=r"(r1), //%3 "=r"(r2), //%4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), //%12 "w"(_k1), //%13 "w"(_k2), //%14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q < inch; q++) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float *r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else /* */ int remain = size; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _pn = vld1q_f32(r0 + 4); float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 8; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Mat & _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float *kernel = _kernel; const float *bias = _bias; for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0. f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *img1 = bottom_blob.channel(q + 1); const float *img2 = bottom_blob.channel(q + 2); const float *img3 = bottom_blob.channel(q + 3); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float *r0 = img0; const float *r1 = img1; const float *r2 = img2; const float *r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else /* */ int remain = outw; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn > 0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0 + 8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); float32x4x2_t _p1x2 = vld2q_f32(r1); float32x4_t _p1 = _p1x2.val[0]; float32x4x2_t _p1nx2 = vld2q_f32(r1 + 8); float32x4_t _p1n = _p1nx2.val[0]; _outp = vmlaq_f32(_outp, _p1, _k1); _outpn = vmlaq_f32(_outpn, _p1n, _k1); float32x4x2_t _p2x2 = vld2q_f32(r2); float32x4_t _p2 = _p2x2.val[0]; float32x4x2_t _p2nx2 = vld2q_f32(r2 + 8); float32x4_t _p2n = _p2nx2.val[0]; _outp = vmlaq_f32(_outp, _p2, _k2); _outpn = vmlaq_f32(_outpn, _p2n, _k2); float32x4x2_t _p3x2 = vld2q_f32(r3); float32x4_t _p3 = _p3x2.val[0]; float32x4x2_t _p3nx2 = vld2q_f32(r3 + 8); float32x4_t _p3n = _p3nx2.val[0]; _outp = vmlaq_f32(_outp, _p3, _k3); _outpn = vmlaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0), //%2 "=r"(r1), //%3 "=r"(r2), //%4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), //%12 "w"(_k1), //%13 "w"(_k2), //%14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q < inch; q++) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float *r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else /* */ int remain = outw; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn > 0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0 + 8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 16; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2017 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except //in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed //under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR //CONDITIONS OF ANY KIND, either express or implied.See the License for the //specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif /* // __ARM_NEON */ static void conv1x1s1_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Mat & _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float *kernel = _kernel; const float *bias = _bias; #pragma omp parallel for for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0. f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *img1 = bottom_blob.channel(q + 1); const float *img2 = bottom_blob.channel(q + 2); const float *img3 = bottom_blob.channel(q + 3); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float *r0 = img0; const float *r1 = img1; const float *r2 = img2; const float *r3 = img3; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else /* */ int remain = size; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _pn = vld1q_f32(r0 + 4); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); float32x4_t _p1 = vld1q_f32(r1); float32x4_t _p1n = vld1q_f32(r1 + 4); _outp = vfmaq_f32(_outp, _p1, _k1); _outpn = vfmaq_f32(_outpn, _p1n, _k1); float32x4_t _p2 = vld1q_f32(r2); float32x4_t _p2n = vld1q_f32(r2 + 4); _outp = vfmaq_f32(_outp, _p2, _k2); _outpn = vfmaq_f32(_outpn, _p2n, _k2); float32x4_t _p3 = vld1q_f32(r3); float32x4_t _p3n = vld1q_f32(r3 + 4); _outp = vfmaq_f32(_outp, _p3, _k3); _outpn = vfmaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q3, %q12 \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3 :128]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q3, %q13 \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4 :128]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q3, %q14 \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q3, %q15 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0), //%2 "=r"(r1), //%3 "=r"(r2), //%4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), //%12 "w"(_k1), //%13 "w"(_k2), //%14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0++; r1++; r2++; r3++; outptr++; } } for (; q < inch; q++) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float *r0 = img0; int size = outw * outh; #if __ARM_NEON int nn = size >> 3; int remain = size & 7; #else /* */ int remain = size; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn > 0; nn--) { float32x4_t _p = vld1q_f32(r0); float32x4_t _outp = vld1q_f32(outptr); float32x4_t _pn = vld1q_f32(r0 + 4); float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vfmaq_f32(_outp, _p, _k0); _outpn = vfmaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 8; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q3, %q6 \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2 :128]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0++; outptr++; } } } } static void conv1x1s2_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & _kernel, const Mat & _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float *kernel = _kernel; const float *bias = _bias; #pragma omp parallel for for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0. f; out.fill(bias0); int q = 0; for (; q + 3 < inch; q += 4) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *img1 = bottom_blob.channel(q + 1); const float *img2 = bottom_blob.channel(q + 2); const float *img3 = bottom_blob.channel(q + 3); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float k1 = kernel0[1]; const float k2 = kernel0[2]; const float k3 = kernel0[3]; const float *r0 = img0; const float *r1 = img1; const float *r2 = img2; const float *r3 = img3; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else /* */ int remain = outw; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); float32x4_t _k1 = vdupq_n_f32(k1); float32x4_t _k2 = vdupq_n_f32(k2); float32x4_t _k3 = vdupq_n_f32(k3); #if __aarch64__ for (; nn > 0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0 + 8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); float32x4x2_t _p1x2 = vld2q_f32(r1); float32x4_t _p1 = _p1x2.val[0]; float32x4x2_t _p1nx2 = vld2q_f32(r1 + 8); float32x4_t _p1n = _p1nx2.val[0]; _outp = vmlaq_f32(_outp, _p1, _k1); _outpn = vmlaq_f32(_outpn, _p1n, _k1); float32x4x2_t _p2x2 = vld2q_f32(r2); float32x4_t _p2 = _p2x2.val[0]; float32x4x2_t _p2nx2 = vld2q_f32(r2 + 8); float32x4_t _p2n = _p2nx2.val[0]; _outp = vmlaq_f32(_outp, _p2, _k2); _outpn = vmlaq_f32(_outpn, _p2n, _k2); float32x4x2_t _p3x2 = vld2q_f32(r3); float32x4_t _p3 = _p3x2.val[0]; float32x4x2_t _p3nx2 = vld2q_f32(r3 + 8); float32x4_t _p3n = _p3nx2.val[0]; _outp = vmlaq_f32(_outp, _p3, _k3); _outpn = vmlaq_f32(_outpn, _p3n, _k3); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q12 \n" "vmla.f32 q1, q8, %q12 \n" "pld [%3, #512] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vld2.f32 {d16-d19}, [%3]! \n" "vmla.f32 q0, q2, %q13 \n" "vmla.f32 q1, q8, %q13 \n" "pld [%4, #512] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vld2.f32 {d16-d19}, [%4]! \n" "vmla.f32 q0, q2, %q14 \n" "vmla.f32 q1, q8, %q14 \n" "pld [%5, #512] \n" "vld2.f32 {d4-d7}, [%5]! \n" "vld2.f32 {d16-d19}, [%5]! \n" "vmla.f32 q0, q2, %q15 \n" "vmla.f32 q1, q8, %q15 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0), //%2 "=r"(r1), //%3 "=r"(r2), //%4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0), //%12 "w"(_k1), //%13 "w"(_k2), //%14 "w"(_k3) // %15 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; float sum1 = *r1 * k1; float sum2 = *r2 * k2; float sum3 = *r3 * k3; *outptr += sum + sum1 + sum2 + sum3; r0 += 2; r1 += 2; r2 += 2; r3 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } for (; q < inch; q++) { float *outptr = out; const float *img0 = bottom_blob.channel(q); const float *kernel0 = kernel + p * inch + q; const float k0 = kernel0[0]; const float *r0 = img0; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else /* */ int remain = outw; #endif /* // __ARM_NEON */ #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(k0); #if __aarch64__ for (; nn > 0; nn--) { float32x4x2_t _px2 = vld2q_f32(r0); float32x4_t _p = _px2.val[0]; float32x4_t _outp = vld1q_f32(outptr); float32x4x2_t _pnx2 = vld2q_f32(r0 + 8); float32x4_t _pn = _pnx2.val[0]; float32x4_t _outpn = vld1q_f32(outptr + 4); _outp = vmlaq_f32(_outp, _p, _k0); _outpn = vmlaq_f32(_outpn, _pn, _k0); vst1q_f32(outptr, _outp); vst1q_f32(outptr + 4, _outpn); r0 += 16; outptr += 8; } #else /* */ if (nn > 0) { asm volatile ( "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "0: \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1] \n" "vmla.f32 q0, q2, %q6 \n" "vmla.f32 q1, q8, %q6 \n" "pld [%2, #512] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vld2.f32 {d16-d19}, [%2]! \n" "subs %0, #1 \n" "vst1.f32 {d0-d3}, [%1]! \n" "bne 0b \n" "sub %2, #64 \n" : "=r" (nn), //%0 "=r"(outptr), //%1 "=r"(r0) // %2 : "0"(nn), "1"(outptr), "2"(r0), "w"(_k0) // %6 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9" ); } #endif /* // __aarch64__ */ #endif /* // __ARM_NEON */ for (; remain > 0; remain--) { float sum = *r0 * k0; *outptr += sum; r0 += 2; outptr++; } r0 += tailstep; } } } }
main.c
/// /// @copyright Copyright (c) 2013-2016, Université Pierre et Marie Curie /// All rights reserved. /// /// <b>hiCL</b> is owned by Université Pierre et Marie Curie (UPMC), /// funded by TOTAL, and written by Issam SAID <said.issam@gmail.com>. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// 1. Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// 2. Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// 3. Neither the name of the UPMC nor the names of its contributors /// may be used to endorse or promote products derived from this software /// without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, /// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UPMC OR /// ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, /// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, /// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR /// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF /// LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING /// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS /// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// @file sgemm/main.c /// @author Issam SAID /// @brief An example of matrix to matrix multiplication code based on /// the hiCL C/C++ interface. /// #include <stdio.h> #include <time.h> #include <string.h> #include <hiCL/hiCL.h> #define N 1024 /// /// @brief The main program of the hiCL based sgemm C/C++ example. /// /// This is the main routine that shows how to use the hiCL C/C++ interface /// to implement a simple matrix to matrix multiplication. /// Note that the OpenCL kernel is implemented in a seperate file (sgemm.cl). /// @return Error code if any. /// int main(void) { unsigned int i; float *a; float *b; float *c; size_t global[3] = {N, N, 1}; size_t local[3] = {16, 16, 1}; hidev_t device; fprintf(stdout, "... start of the hiCL sgemm C/C++ example\n"); /// ///< Initialize hiCL with selecting the default GPU. /// hicl_init(GPU); /// ///< Load and build the OpenCL kernel that runs the multiplication. ///< Note the '-cl-kernel-arg-info' that is added for NVIDIA GPUs to ///< use some OpenCL 1.2 features. /// hicl_load(PREFIX"/sgemm.cl", NULL); /// ///< Get a pointer to the desired device (in this case the default GPU). /// device = hicl_dev_find(DEFAULT); a = (float*)malloc(N*N*sizeof(float)); b = (float*)malloc(N*N*sizeof(float)); c = (float*)malloc(N*N*sizeof(float)); memset(c, 0, N*N*sizeof(float)); srand (time(NULL)); #pragma omp parallel for private(i) for (i = 0; i< N*N; ++i) a[i] = i%2 == 0 ? -rand()%10 : rand()%10; #pragma omp parallel for private(i) for (i = 0; i< N*N; ++i) b[i] = 1; /// ///< Wrap the matrices into hiCL memory objects. /// hicl_mem_wrap(device, a, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, b, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, c, N*N, FLOAT | READ_WRITE | HWA); /// ///< Set the work size and the dimensions of the kernel. /// hicl_knl_set_wrk("sgemm", 2, global, local); /// ///< Run the kernel on the default GPU. /// hicl_knl_run("sgemm", device, a, b, c, N); /// ///< Update the C matrix on the CPU side so that the results can be seen ///< on the host side. /// hicl_mem_update(c, READ_ONLY); free(a); free(b); free(c); /// ///< Release hiCL resources. /// hicl_release(); fprintf(stdout, "... end of the hiCL sgemm C/C++ example\n"); return EXIT_SUCCESS; }
/// /// @copyright Copyright (c) 2013-2016, Université Pierre et Marie Curie /// All rights reserved. /// /// <b>hiCL</b> is owned by Université Pierre et Marie Curie (UPMC), /// funded by TOTAL, and written by Issam SAID <said.issam@gmail.com>. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// 1. Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// 2. Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// 3. Neither the name of the UPMC nor the names of its contributors /// may be used to endorse or promote products derived from this software /// without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, /// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UPMC OR /// ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, /// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, /// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR /// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF /// LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING /// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS /// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// @file sgemm/main.c /// @author Issam SAID /// @brief An example of matrix to matrix multiplication code based on /// the hiCL C/C++ interface. /// #include <stdio.h> #include <time.h> #include <string.h> #include <hiCL/hiCL.h> #define N 1024 /// /// @brief The main program of the hiCL based sgemm C/C++ example. /// /// This is the main routine that shows how to use the hiCL C/C++ interface /// to implement a simple matrix to matrix multiplication. /// Note that the OpenCL kernel is implemented in a seperate file (sgemm.cl). /// @return Error code if any. /// int main(void) { unsigned int i; float *a; float *b; float *c; size_t global[3] = {N, N, 1}; size_t local[3] = {16, 16, 1}; hidev_t device; fprintf(stdout, "... start of the hiCL sgemm C/C++ example\n"); /// ///< Initialize hiCL with selecting the default GPU. /// hicl_init(GPU); /// ///< Load and build the OpenCL kernel that runs the multiplication. ///< Note the '-cl-kernel-arg-info' that is added for NVIDIA GPUs to ///< use some OpenCL 1.2 features. /// hicl_load(PREFIX"/sgemm.cl", NULL); /// ///< Get a pointer to the desired device (in this case the default GPU). /// device = hicl_dev_find(DEFAULT); a = (float*)malloc(N*N*sizeof(float)); b = (float*)malloc(N*N*sizeof(float)); c = (float*)malloc(N*N*sizeof(float)); memset(c, 0, N*N*sizeof(float)); srand (time(NULL)); for (i = 0; i< N*N; ++i) a[i] = i%2 == 0 ? -rand()%10 : rand()%10; for (i = 0; i< N*N; ++i) b[i] = 1; /// ///< Wrap the matrices into hiCL memory objects. /// hicl_mem_wrap(device, a, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, b, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, c, N*N, FLOAT | READ_WRITE | HWA); /// ///< Set the work size and the dimensions of the kernel. /// hicl_knl_set_wrk("sgemm", 2, global, local); /// ///< Run the kernel on the default GPU. /// hicl_knl_run("sgemm", device, a, b, c, N); /// ///< Update the C matrix on the CPU side so that the results can be seen ///< on the host side. /// hicl_mem_update(c, READ_ONLY); free(a); free(b); free(c); /// ///< Release hiCL resources. /// hicl_release(); fprintf(stdout, "... end of the hiCL sgemm C/C++ example\n"); return EXIT_SUCCESS; }
/// /// @copyright Copyright (c) 2013-2016, Université Pierre et Marie Curie /// All rights reserved. /// /// <b>hiCL</b> is owned by Université Pierre et Marie Curie (UPMC), /// funded by TOTAL, and written by Issam SAID <said.issam@gmail.com>. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// 1. Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// 2. Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// 3. Neither the name of the UPMC nor the names of its contributors /// may be used to endorse or promote products derived from this software /// without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, /// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UPMC OR /// ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, /// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, /// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR /// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF /// LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING /// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS /// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// @file sgemm/main.c /// @author Issam SAID /// @brief An example of matrix to matrix multiplication code based on /// the hiCL C/C++ interface. /// #include <stdio.h> #include <time.h> #include <string.h> #include <hiCL/hiCL.h> #define N 1024 /// /// @brief The main program of the hiCL based sgemm C/C++ example. /// /// This is the main routine that shows how to use the hiCL C/C++ interface /// to implement a simple matrix to matrix multiplication. /// Note that the OpenCL kernel is implemented in a seperate file (sgemm.cl). /// @return Error code if any. /// int main(void) { unsigned int i; float *a; float *b; float *c; size_t global[3] = {N, N, 1}; size_t local[3] = {16, 16, 1}; hidev_t device; fprintf(stdout, "... start of the hiCL sgemm C/C++ example\n"); /// ///< Initialize hiCL with selecting the default GPU. /// hicl_init(GPU); /// ///< Load and build the OpenCL kernel that runs the multiplication. ///< Note the '-cl-kernel-arg-info' that is added for NVIDIA GPUs to ///< use some OpenCL 1.2 features. /// hicl_load(PREFIX"/sgemm.cl", NULL); /// ///< Get a pointer to the desired device (in this case the default GPU). /// device = hicl_dev_find(DEFAULT); a = (float*)malloc(N*N*sizeof(float)); b = (float*)malloc(N*N*sizeof(float)); c = (float*)malloc(N*N*sizeof(float)); memset(c, 0, N*N*sizeof(float)); srand (time(NULL)); #pragma omp parallel for private(i) for (i = 0; i< N*N; ++i) a[i] = i%2 == 0 ? -rand()%10 : rand()%10; #pragma omp parallel for private(i) for (i = 0; i< N*N; ++i) b[i] = 1; /// ///< Wrap the matrices into hiCL memory objects. /// hicl_mem_wrap(device, a, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, b, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, c, N*N, FLOAT | READ_WRITE | HWA); /// ///< Set the work size and the dimensions of the kernel. /// hicl_knl_set_wrk("sgemm", 2, global, local); /// ///< Run the kernel on the default GPU. /// hicl_knl_run("sgemm", device, a, b, c, N); /// ///< Update the C matrix on the CPU side so that the results can be seen ///< on the host side. /// hicl_mem_update(c, READ_ONLY); free(a); free(b); free(c); /// ///< Release hiCL resources. /// hicl_release(); fprintf(stdout, "... end of the hiCL sgemm C/C++ example\n"); return EXIT_SUCCESS; }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short int type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[256], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op) { const char *blend_mode; switch (op) { case ColorBurnCompositeOp: blend_mode = "idiv"; break; case ColorDodgeCompositeOp: blend_mode = "div "; break; case ColorizeCompositeOp: blend_mode = "colr"; break; case DarkenCompositeOp: blend_mode = "dark"; break; case DifferenceCompositeOp: blend_mode = "diff"; break; case DissolveCompositeOp: blend_mode = "diss"; break; case ExclusionCompositeOp: blend_mode = "smud"; break; case HardLightCompositeOp: blend_mode = "hLit"; break; case HardMixCompositeOp: blend_mode = "hMix"; break; case HueCompositeOp: blend_mode = "hue "; break; case LightenCompositeOp: blend_mode = "lite"; break; case LinearBurnCompositeOp: blend_mode = "lbrn"; break; case LinearDodgeCompositeOp:blend_mode = "lddg"; break; case LinearLightCompositeOp:blend_mode = "lLit"; break; case LuminizeCompositeOp: blend_mode = "lum "; break; case MultiplyCompositeOp: blend_mode = "mul "; break; case OverCompositeOp: blend_mode = "norm"; break; case OverlayCompositeOp: blend_mode = "over"; break; case PinLightCompositeOp: blend_mode = "pLit"; break; case SaturateCompositeOp: blend_mode = "sat "; break; case ScreenCompositeOp: blend_mode = "scrn"; break; case SoftLightCompositeOp: blend_mode = "sLit"; break; case VividLightCompositeOp: blend_mode = "vLit"; break; default: blend_mode = "norm"; } return(blend_mode); } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); image->alpha_trait=BlendPixelTrait; status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue, exception); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=background; SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } image->alpha_trait=BlendPixelTrait; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); else if (image->depth > 8) return(2); } else if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static void ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image,ExceptionInfo *exception) { const unsigned char *p; StringInfo *profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return; profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); (void) SetImageProfile(image,"8bim",profile,exception); profile=DestroyStringInfo(profile); for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if ((p+count) > (blocks+length-16)) return; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* Resolution info. */ p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.x); (void) SetImageProperty(image,"tiff:XResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.y); (void) SetImageProperty(image,"tiff:YResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if (*(p+4) == 0) *has_merged_image=MagickFalse; p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return; } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { if (packet_size == 1) SetPixelIndex(image,ScaleQuantumToChar(pixel),q); else SetPixelIndex(image,ScaleQuantumToShort(pixel),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) ConstrainColormapIndex(image,GetPixelIndex(image,q),exception),q); return; } switch (type) { case -1: { SetPixelAlpha(image, pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); if (channels == 1 || type == -2) SetPixelGray(image,pixel,q); break; } case 1: { if (image->storage_class == PseudoClass) SetPixelAlpha(image,pixel,q); else SetPixelGreen(image,pixel,q); break; } case 2: { if (image->storage_class == PseudoClass) SetPixelAlpha(image,pixel,q); else SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const size_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t count, row_size; ssize_t y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > row_size + 256) // arbitrary number { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; (void) ReadBlob(image,compact_size,compact_pixels); stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } } } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if (layer_info->channel_info[channel].type < -1) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); SetImageType(mask,GrayscaleType,exception); channel_image=mask; } offset=TellBlob(image); status=MagickTrue; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } layer_info->mask.image=mask; return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; if (psd_info->mode == CMYKMode) SetImageColorspace(layer_info->image,CMYKColorspace,exception); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) SetImageColorspace(layer_info->image,GRAYColorspace,exception); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j, compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); status=MagickFalse; if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0)) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) ResetMagickMemory(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } count=ReadBlob(image,4,(unsigned char *) type); ReversePSDString(image,type,4); if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); /* We read it, but don't use it... */ for (j=0; j < (ssize_t) length; j+=8) { size_t blend_source=ReadBlobLong(image); size_t blend_dest=ReadBlobLong(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " source(%x), dest(%x)",(unsigned int) blend_source,(unsigned int) blend_dest); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,i,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; ssize_t count; unsigned char *data; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if (psd_info.mode == LabMode) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { SetImageColorspace(image,CMYKColorspace,exception); if (psd_info.channels > 4) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536, exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); SetImageColorspace(image,GRAYColorspace,exception); if (psd_info.channels > 1) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); } else if (psd_info.channels > 3) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if (psd_info.mode == DuotoneMode) { /* Duotone image data; the format of this data is undocumented. */ data=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*data)); if (data == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); (void) ReadBlob(image,(size_t) length,data); data=(unsigned char *) RelinquishMagickMemory(data); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image, exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayers(image,image_info,&psd_info,skip_layers,exception) != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1)) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) && (length != 0)) { SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayers(image,image_info,&psd_info,MagickFalse,exception); if (status != MagickTrue) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (GetImageListLength(image) == 1) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); SetImageAlphaChannel(image,TransparentAlphaChannel,exception); image->background_color.alpha=TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned short) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=(WriteBlobMSBLong(image,(unsigned short) size)); SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { size_t length; ssize_t i, y; if (next_image->compression == RLECompression) { length=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) length=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else length=WriteBlobMSBShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { int y; MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } ResetMagickMemory(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels=next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12))) { (void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(*p++); key[1]=(*p++); key[2]=(*p++); key[3]=(*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ if (SetImageGray(image,exception) != MagickFalse) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((MagickOffsetType) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); SetPSDSize(&psd_info,image,0); SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=strlen(property) == 9 ? 255 : 0; } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1U; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=next_image->colorspace == CMYKColorspace ? 4U : 3U; total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image->compose)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ? 1 << 0x02 : 1); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,mask->page.y); size+=WriteBlobMSBSignedLong(image,mask->page.x); size+=WriteBlobMSBLong(image,(const unsigned int) mask->rows+ mask->page.y); size+=WriteBlobMSBLong(image,(const unsigned int) mask->columns+ mask->page.x); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 16),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* * Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* * Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* * Typedef declaractions. */ typedef struct _ChannelInfo { short int type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image * image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image * image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[256], visible; unsigned short channels; StringInfo * info; } LayerInfo; /* * Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *, Image *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s P S D * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsPSD()() returns MagickTrue if the image format type, * identified by the % magick string, is PSD. % % The format of the IsPSD * method is: % % MagickBooleanType IsPSD(const unsigned char * *magick,const size_t length) % % A description of each parameter follows: * % % o magick: compare image format pattern against these bytes. % % * o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick, const size_t length) { if (length < 4) return (MagickFalse); if (LocaleNCompare((const char *)magick, "8BPS", 4) == 0) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e a d P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns * it. It % allocates the memory necessary for the new Image structure and * returns a % pointer to the new image. % % The format of the ReadPSDImage * method is: % % Image *ReadPSDImage(image_info,ExceptionInfo * *exception) % % A description of each parameter follows: % % o * image_info: the image info. % % o exception: return any errors or * warnings in this structure. % */ static const char * CompositeOperatorToPSDBlendMode(CompositeOperator op) { const char *blend_mode; switch (op) { case ColorBurnCompositeOp: blend_mode = "idiv"; break; case ColorDodgeCompositeOp: blend_mode = "div "; break; case ColorizeCompositeOp: blend_mode = "colr"; break; case DarkenCompositeOp: blend_mode = "dark"; break; case DifferenceCompositeOp: blend_mode = "diff"; break; case DissolveCompositeOp: blend_mode = "diss"; break; case ExclusionCompositeOp: blend_mode = "smud"; break; case HardLightCompositeOp: blend_mode = "hLit"; break; case HardMixCompositeOp: blend_mode = "hMix"; break; case HueCompositeOp: blend_mode = "hue "; break; case LightenCompositeOp: blend_mode = "lite"; break; case LinearBurnCompositeOp: blend_mode = "lbrn"; break; case LinearDodgeCompositeOp: blend_mode = "lddg"; break; case LinearLightCompositeOp: blend_mode = "lLit"; break; case LuminizeCompositeOp: blend_mode = "lum "; break; case MultiplyCompositeOp: blend_mode = "mul "; break; case OverCompositeOp: blend_mode = "norm"; break; case OverlayCompositeOp: blend_mode = "over"; break; case PinLightCompositeOp: blend_mode = "pLit"; break; case SaturateCompositeOp: blend_mode = "sat "; break; case ScreenCompositeOp: blend_mode = "scrn"; break; case SoftLightCompositeOp: blend_mode = "sLit"; break; case VividLightCompositeOp: blend_mode = "vLit"; break; default: blend_mode = "norm"; } return (blend_mode); } /* * For some reason Photoshop seems to blend semi-transparent pixels with * white. This method reverts the blending. This can be disabled by setting * the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return (MagickTrue); option = GetImageOption(image_info, "psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return (MagickTrue); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma = QuantumScale * GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); if (channel != AlphaPixelChannel) q[i] = ClampToQuantum((q[i] - ((1.0 - gamma) * QuantumRange)) / gamma); } } q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image * image, Quantum opacity, MagickBooleanType revert, ExceptionInfo * exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying layer opacity %.20g", (double)opacity); if (opacity == OpaqueAlpha) return (MagickTrue); image->alpha_trait = BlendPixelTrait; status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image, (Quantum) (QuantumScale * (GetPixelAlpha(image, q)) * opacity), q); else if (opacity > 0) SetPixelAlpha(image, (Quantum) (QuantumRange * (GetPixelAlpha(image, q) / (MagickRealType) opacity)), q); q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static MagickBooleanType ApplyPSDOpacityMask(Image * image, const Image * mask, Quantum background, MagickBooleanType revert, ExceptionInfo * exception) { Image * complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying opacity mask"); complete_mask = CloneImage(image, image->columns, image->rows, MagickTrue, exception); complete_mask->alpha_trait = BlendPixelTrait; GetPixelInfo(complete_mask, &color); color.red = background; SetImageColor(complete_mask, &color, exception); status = CompositeImage(complete_mask, mask, OverCompositeOp, MagickTrue, mask->page.x - image->page.x, mask->page.y - image->page.y, exception); if (status == MagickFalse) { complete_mask = DestroyImage(complete_mask); return (status); } image->alpha_trait = BlendPixelTrait; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register Quantum * p; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); p = GetAuthenticPixels(complete_mask, 0, y, complete_mask->columns, 1, exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha = GetPixelAlpha(image, q); intensity = GetPixelIntensity(complete_mask, p); if (revert == MagickFalse) SetPixelAlpha(image, ClampToQuantum(intensity * (QuantumScale * alpha)), q); else if (intensity > 0) SetPixelAlpha(image, ClampToQuantum((alpha / intensity) * QuantumRange), q); q += GetPixelChannels(image); p += GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } complete_mask = DestroyImage(complete_mask); return (status); } static void PreservePSDOpacityMask(Image * image, LayerInfo * layer_info, ExceptionInfo * exception) { char *key; RandomInfo * random_info; StringInfo * key_info; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " preserving opacity mask"); random_info = AcquireRandomInfo(); key_info = GetRandomKey(random_info, 2 + 1); key = (char *)GetStringInfoDatum(key_info); key[8] = layer_info->mask.background; key[9] = '\0'; layer_info->mask.image->page.x += layer_info->page.x; layer_info->mask.image->page.y += layer_info->page.y; (void)SetImageRegistry(ImageRegistryType, (const char *)key, layer_info->mask.image, exception); (void)SetImageArtifact(layer_info->image, "psd:opacity-mask", (const char *)key); key_info = DestroyStringInfo(key_info); random_info = DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels, const ssize_t depth, const size_t number_pixels, unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets = (ssize_t) number_compact_pixels; for (i = 0; (packets > 1) && (i < (ssize_t) number_pixels);) { packets--; length = (size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length = 256 - length + 1; CheckNumberCompactPixels; pixel = (*compact_pixels++); for (j = 0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (pixel >> 7) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 6) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 5) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 4) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 3) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 2) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 1) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (unsigned char)((pixel >> 6) & 0x03); *pixels++ = (unsigned char)((pixel >> 4) & 0x03); *pixels++ = (unsigned char)((pixel >> 2) & 0x03); *pixels++ = (unsigned char)((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++ = (unsigned char)((pixel >> 4) & 0xff); *pixels++ = (unsigned char)((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++ = (unsigned char)pixel; break; } } } continue; } length++; for (j = 0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (*compact_pixels >> 6) & 0x03; *pixels++ = (*compact_pixels >> 4) & 0x03; *pixels++ = (*compact_pixels >> 2) & 0x03; *pixels++ = (*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++ = (*compact_pixels >> 4) & 0xff; *pixels++ = (*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++ = (*compact_pixels); break; } } compact_pixels++; } } return (i); } static inline LayerInfo * DestroyLayerInfo(LayerInfo * layer_info, const ssize_t number_layers) { ssize_t i; for (i = 0; i < number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image = DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image = DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(Image * image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return (2); else if (image->depth > 8) return (2); } else if (image->depth > 8) return (2); return (1); } static inline MagickSizeType GetPSDSize(const PSDInfo * psd_info, Image * image) { if (psd_info->version == 1) return ((MagickSizeType) ReadBlobLong(image)); return ((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image * image) { if (image->depth == 1) return (((image->columns + 7) / 8) * GetPSDPacketSize(image)); else return (image->columns * GetPSDPacketSize(image)); } static const char * ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image * image, ExceptionInfo * exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask = SetImageChannelMask(image, (ChannelType) (AllChannels & ~ AlphaChannel)); status = NegateImage(image, MagickFalse, exception); (void)SetImageChannelMask(image, channel_mask); return (status); } static void ParseImageResourceBlocks(Image * image, const unsigned char *blocks, size_t length, MagickBooleanType * has_merged_image, ExceptionInfo * exception) { const unsigned char *p; StringInfo * profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return; profile = BlobToStringInfo((const unsigned char *)NULL, length); SetStringInfoDatum(profile, blocks); (void)SetImageProfile(image, "8bim", profile, exception); profile = DestroyStringInfo(profile); for (p = blocks; (p >= blocks) && (p < (blocks + length - 16));) { if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); if ((p + count) > (blocks + length - 16)) return; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* * Resolution info. */ p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.x = (double)resolution; (void)FormatLocaleString(value, MagickPathExtent, "%g", image->resolution.x); (void)SetImageProperty(image, "tiff:XResolution", value, exception); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.y = (double)resolution; (void)FormatLocaleString(value, MagickPathExtent, "%g", image->resolution.y); (void)SetImageProperty(image, "tiff:YResolution", value, exception); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); image->units = PixelsPerInchResolution; break; } case 0x0421: { if (*(p + 4) == 0) *has_merged_image = MagickFalse; p += count; break; } default: { p += count; break; } } if ((count & 0x01) != 0) p++; } return; } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *)NULL) return (OverCompositeOp); if (LocaleNCompare(mode, "norm", 4) == 0) return (OverCompositeOp); if (LocaleNCompare(mode, "mul ", 4) == 0) return (MultiplyCompositeOp); if (LocaleNCompare(mode, "diss", 4) == 0) return (DissolveCompositeOp); if (LocaleNCompare(mode, "diff", 4) == 0) return (DifferenceCompositeOp); if (LocaleNCompare(mode, "dark", 4) == 0) return (DarkenCompositeOp); if (LocaleNCompare(mode, "lite", 4) == 0) return (LightenCompositeOp); if (LocaleNCompare(mode, "hue ", 4) == 0) return (HueCompositeOp); if (LocaleNCompare(mode, "sat ", 4) == 0) return (SaturateCompositeOp); if (LocaleNCompare(mode, "colr", 4) == 0) return (ColorizeCompositeOp); if (LocaleNCompare(mode, "lum ", 4) == 0) return (LuminizeCompositeOp); if (LocaleNCompare(mode, "scrn", 4) == 0) return (ScreenCompositeOp); if (LocaleNCompare(mode, "over", 4) == 0) return (OverlayCompositeOp); if (LocaleNCompare(mode, "hLit", 4) == 0) return (HardLightCompositeOp); if (LocaleNCompare(mode, "sLit", 4) == 0) return (SoftLightCompositeOp); if (LocaleNCompare(mode, "smud", 4) == 0) return (ExclusionCompositeOp); if (LocaleNCompare(mode, "div ", 4) == 0) return (ColorDodgeCompositeOp); if (LocaleNCompare(mode, "idiv", 4) == 0) return (ColorBurnCompositeOp); if (LocaleNCompare(mode, "lbrn", 4) == 0) return (LinearBurnCompositeOp); if (LocaleNCompare(mode, "lddg", 4) == 0) return (LinearDodgeCompositeOp); if (LocaleNCompare(mode, "lLit", 4) == 0) return (LinearLightCompositeOp); if (LocaleNCompare(mode, "vLit", 4) == 0) return (VividLightCompositeOp); if (LocaleNCompare(mode, "pLit", 4) == 0) return (PinLightCompositeOp); if (LocaleNCompare(mode, "hMix", 4) == 0) return (HardMixCompositeOp); return (OverCompositeOp); } static inline void ReversePSDString(Image * image, char *p, size_t length) { char *q; if (image->endian == MSBEndian) return; q = p + length; for (--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image * image, const size_t channels, const ssize_t type, const size_t packet_size, const Quantum pixel, Quantum * q, ExceptionInfo * exception) { if (image->storage_class == PseudoClass) { if (packet_size == 1) SetPixelIndex(image, ScaleQuantumToChar(pixel), q); else SetPixelIndex(image, ScaleQuantumToShort(pixel), q); SetPixelViaPixelInfo(image, image->colormap + (ssize_t) ConstrainColormapIndex(image, GetPixelIndex(image, q), exception), q); return; } switch (type) { case -1: { SetPixelAlpha(image, pixel, q); break; } case -2: case 0: { SetPixelRed(image, pixel, q); if (channels == 1 || type == -2) SetPixelGray(image, pixel, q); break; } case 1: { if (image->storage_class == PseudoClass) SetPixelAlpha(image, pixel, q); else SetPixelGreen(image, pixel, q); break; } case 2: { if (image->storage_class == PseudoClass) SetPixelAlpha(image, pixel, q); else SetPixelBlue(image, pixel, q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image, pixel, q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image * image, const size_t channels, const size_t row, const ssize_t type, const unsigned char *pixels, ExceptionInfo * exception) { Quantum pixel; register const unsigned char *p; register Quantum * q; register ssize_t x; size_t packet_size; unsigned short nibble; p = pixels; q = GetAuthenticPixels(image, 0, row, image->columns, 1, exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size = GetPSDPacketSize(image); for (x = 0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel = ScaleCharToQuantum(*p++); else { p = PushShortPixel(MSBEndian, p, &nibble); pixel = ScaleShortToQuantum(nibble); } if (image->depth > 1) { SetPSDPixel(image, channels, type, packet_size, pixel, q, exception); q += GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits = image->columns - x; if (number_bits > 8) number_bits = 8; for (bit = 0; bit < number_bits; bit++) { SetPSDPixel(image, channels, type, packet_size, (((unsigned char)pixel) & (0x01 << (7 - bit))) != 0 ? 0 : QuantumRange, q, exception); q += GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return (SyncAuthenticPixels(image, exception)); } static MagickBooleanType ReadPSDChannelRaw(Image * image, const size_t channels, const ssize_t type, ExceptionInfo * exception) { MagickBooleanType status; size_t count, row_size; ssize_t y; unsigned char *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RAW"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, row_size, pixels); if (count != row_size) break; status = ReadPSDChannelPixels(image, channels, y, type, pixels, exception); if (status == MagickFalse) break; } pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } static inline MagickOffsetType * ReadPSDRLESizes(Image * image, const PSDInfo * psd_info, const size_t size) { MagickOffsetType * sizes; ssize_t y; sizes = (MagickOffsetType *) AcquireQuantumMemory(size, sizeof(*sizes)); if (sizes != (MagickOffsetType *) NULL) { for (y = 0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y] = (MagickOffsetType) ReadBlobShort(image); else sizes[y] = (MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image * image, const PSDInfo * psd_info, const ssize_t type, MagickOffsetType * sizes, ExceptionInfo * exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RLE compressed"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); length = 0; for (y = 0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length = (size_t) sizes[y]; if (length > row_size + 256) //arbitrary number { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "InvalidLength", image->filename); } compact_pixels = (unsigned char *)AcquireQuantumMemory(length, sizeof(*pixels)); if (compact_pixels == (unsigned char *)NULL) { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)ResetMagickMemory(compact_pixels, 0, length * sizeof(*compact_pixels)); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, (size_t) sizes[y], compact_pixels); if (count != (ssize_t) sizes[y]) break; count = DecodePSDPixels((size_t) sizes[y], compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth), row_size, pixels); if (count != (ssize_t) row_size) break; status = ReadPSDChannelPixels(image, psd_info->channels, y, type, pixels, exception); if (status == MagickFalse) break; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image * image, const size_t channels, const ssize_t type, const PSDCompressionType compression, const size_t compact_size, ExceptionInfo * exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is ZIP compressed"); compact_pixels = (unsigned char *)AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); packet_size = GetPSDPacketSize(image); row_size = image->columns * packet_size; count = image->rows * row_size; pixels = (unsigned char *)AcquireQuantumMemory(count, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) { compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } ResetMagickMemory(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; (void)ReadBlob(image, compact_size, compact_pixels); stream.next_in = (Bytef *) compact_pixels; stream.avail_in = (uInt) compact_size; stream.next_out = (Bytef *) pixels; stream.avail_out = (uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret = inflate(&stream, Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (MagickFalse); } } } if (compression == ZipWithPrediction) { p = pixels; while (count > 0) { length = image->columns; while (--length) { if (packet_size == 2) { p[2] += p[0] + ((p[1] + p[3]) >> 8); p[3] += p[1]; } else *(p + 1) += *p; p += packet_size; } p += packet_size; count -= row_size; } } status = MagickTrue; p = pixels; for (y = 0; y < (ssize_t) image->rows; y++) { status = ReadPSDChannelPixels(image, channels, y, type, p, exception); if (status == MagickFalse) break; p += row_size; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #endif static MagickBooleanType ReadPSDChannel(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, const size_t channel, const PSDCompressionType compression, ExceptionInfo * exception) { Image * channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image = image; mask = (Image *) NULL; if (layer_info->channel_info[channel].type < -1) { const char *option; /* * Ignore mask that is not a user supplied layer mask, if the mask is * disabled or if the flags have unsupported values. */ option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { SeekBlob(image, layer_info->channel_info[channel].size - 2, SEEK_CUR); return (MagickTrue); } mask = CloneImage(image, layer_info->mask.page.width, layer_info->mask.page.height, MagickFalse, exception); SetImageType(mask, GrayscaleType, exception); channel_image = mask; } offset = TellBlob(image); status = MagickTrue; switch (compression) { case Raw: status = ReadPSDChannelRaw(channel_image, psd_info->channels, layer_info->channel_info[channel].type, exception); break; case RLE: { MagickOffsetType * sizes; sizes = ReadPSDRLESizes(channel_image, psd_info, channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); status = ReadPSDChannelRLE(channel_image, psd_info, layer_info->channel_info[channel].type, sizes, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status = ReadPSDChannelZip(channel_image, layer_info->channels, layer_info->channel_info[channel].type, compression, layer_info->channel_info[channel].size - 2, exception); #else (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)", image->filename); #endif break; default: (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); break; } SeekBlob(image, offset + layer_info->channel_info[channel].size - 2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) DestroyImage(mask); ThrowBinaryException(CoderError, "UnableToDecompressImage", image->filename); } layer_info->mask.image = mask; return (status); } static MagickBooleanType ReadPSDLayer(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, ExceptionInfo * exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void)SetImageBackgroundColor(layer_info->image, exception); layer_info->image->compose = PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose = NoCompositeOp; if (psd_info->mode == CMYKMode) SetImageColorspace(layer_info->image, CMYKColorspace, exception); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) SetImageColorspace(layer_info->image, GRAYColorspace, exception); /* * Set up some hidden attributes for folks that need them. */ (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.x); (void)SetImageArtifact(layer_info->image, "psd:layer.x", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.y); (void)SetImageArtifact(layer_info->image, "psd:layer.y", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double) layer_info->opacity); (void)SetImageArtifact(layer_info->image, "psd:layer.opacity", message); (void)SetImageProperty(layer_info->image, "label", (char *)layer_info->name, exception); status = MagickTrue; for (j = 0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for channel %.20g", (double)j); compression = (PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression = ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait = BlendPixelTrait; status = ReadPSDChannel(layer_info->image, image_info, psd_info, layer_info, j, compression, exception); if (status == MagickFalse) break; } if (status != MagickFalse) status = ApplyPSDLayerOpacity(layer_info->image, layer_info->opacity, MagickFalse, exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status = NegateCMYK(layer_info->image, exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x = layer_info->mask.page.x; layer_info->mask.image->page.y = layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose = NoCompositeOp; else status = ApplyPSDOpacityMask(layer_info->image, layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange, MagickFalse, exception); option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image, layer_info, exception); layer_info->mask.image = DestroyImage(layer_info->mask.image); } return (status); } ModuleExport MagickBooleanType ReadPSDLayers(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, const MagickBooleanType skip_layers, ExceptionInfo * exception) { char type[4]; LayerInfo * layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size = GetPSDSize(psd_info, image); if (size == 0) { /* * Skip layers & masks. */ (void)ReadBlobLong(image); count = ReadBlob(image, 4, (unsigned char *)type); ReversePSDString(image, type, 4); status = MagickFalse; if ((count == 0) || (LocaleNCompare(type, "8BIM", 4) != 0)) return (MagickTrue); else { count = ReadBlob(image, 4, (unsigned char *)type); ReversePSDString(image, type, 4); if ((count != 0) && (LocaleNCompare(type, "Lr16", 4) == 0)) size = GetPSDSize(psd_info, image); else return (MagickTrue); } } status = MagickTrue; if (size != 0) { layer_info = (LayerInfo *) NULL; number_layers = (short)ReadBlobShort(image); if (number_layers < 0) { /* * The first alpha channel in the merged result contains the * transparency data for the merged result. */ number_layers = MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " negative layer count corrected for"); image->alpha_trait = BlendPixelTrait; } /* * We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return (MagickTrue); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image contains %.20g layers", (double)number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError, "InvalidNumberOfLayers", image->filename); layer_info = (LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)ResetMagickMemory(layer_info, 0, (size_t) number_layers * sizeof(*layer_info)); for (i = 0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading layer #%.20g", (double)i + 1); layer_info[i].page.y = ReadBlobSignedLong(image); layer_info[i].page.x = ReadBlobSignedLong(image); y = ReadBlobSignedLong(image); x = ReadBlobSignedLong(image); layer_info[i].page.width = (size_t) (x - layer_info[i].page.x); layer_info[i].page.height = (size_t) (y - layer_info[i].page.y); layer_info[i].channels = ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double)layer_info[i].page.x, (double)layer_info[i].page.y, (double)layer_info[i].page.height, (double) layer_info[i].page.width, (double)layer_info[i].channels); for (j = 0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type = (short)ReadBlobShort(image); layer_info[i].channel_info[j].size = (size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g", (double)j, (double)layer_info[i].channel_info[j].type, (double)layer_info[i].channel_info[j].size); } count = ReadBlob(image, 4, (unsigned char *)type); ReversePSDString(image, type, 4); if ((count == 0) || (LocaleNCompare(type, "8BIM", 4) != 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } count = ReadBlob(image, 4, (unsigned char *)layer_info[i].blendkey); ReversePSDString(image, layer_info[i].blendkey, 4); layer_info[i].opacity = (Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping = (unsigned char)ReadBlobByte(image); layer_info[i].flags = (unsigned char)ReadBlobByte(image); layer_info[i].visible = !(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey, (double)layer_info[i].opacity, layer_info[i].clipping ? "true" : "false", layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void)ReadBlobByte(image); /* filler */ size = ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer contains additional info"); length = ReadBlobLong(image); combined_length = length + 4; if (length != 0) { /* * Layer mask info. */ layer_info[i].mask.page.y = ReadBlobSignedLong(image); layer_info[i].mask.page.x = ReadBlobSignedLong(image); layer_info[i].mask.page.height = (size_t) (ReadBlobLong(image) - layer_info[i].mask.page.y); layer_info[i].mask.page.width = (size_t) (ReadBlobLong(image) - layer_info[i].mask.page.x); layer_info[i].mask.background = (unsigned char)ReadBlobByte( image); layer_info[i].mask.flags = (unsigned char)ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y = layer_info[i].mask.page.y - layer_info[i].page.y; layer_info[i].mask.page.x = layer_info[i].mask.page.x - layer_info[i].page.x; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double)layer_info[i].mask.page.x, (double) layer_info[i].mask.page.y, (double)layer_info[i].mask.page.width, (double)layer_info[i].mask.page.height, (double) ((MagickOffsetType) length) - 18); /* * Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image, (MagickSizeType) (length - 18)) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = ReadBlobLong(image); combined_length += length + 4; if (length != 0) { /* * Layer blending ranges info. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer blending ranges: length=%.20g", (double) ((MagickOffsetType) length)); /* * We read it, but don't use it... */ for (j = 0; j < (ssize_t) length; j += 8) { size_t blend_source = ReadBlobLong(image); size_t blend_dest = ReadBlobLong(image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " source(%x), dest(%x)", (unsigned int) blend_source, (unsigned int)blend_dest); } } /* * Layer name. */ length = (MagickSizeType) (unsigned char)ReadBlobByte(image); combined_length += length + 1; if (length > 0) (void)ReadBlob(image, (size_t) length++, layer_info[i].name); layer_info[i].name[length] = '\0'; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer name: %s", layer_info[i].name); if ((length % 4) != 0) { length = 4 - (length % 4); combined_length += length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image, length) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = (MagickSizeType) size - combined_length; if (length > 0) { unsigned char *info; layer_info[i].info = AcquireStringInfo((const size_t)length); info = GetStringInfoDatum(layer_info[i].info); (void)ReadBlob(image, (const size_t)length, info); } } } for (i = 0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); continue; } /* * Allocate layered image. */ layer_info[i].image = CloneImage(image, layer_info[i].page.width, layer_info[i].page.height, MagickFalse, exception); if (layer_info[i].image == (Image *) NULL) { layer_info = DestroyLayerInfo(layer_info, number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of image for layer %.20g failed", (double)i); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void)SetImageProfile(layer_info[i].image, "psd:additional-info", layer_info[i].info, exception); layer_info[i].info = DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i = 0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j = 0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image, (MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } continue; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for layer %.20g", (double)i); status = ReadPSDLayer(image, image_info, psd_info, &layer_info[i], exception); if (status == MagickFalse) break; status = SetImageProgress(image, LoadImagesTag, i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i = 0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j = i; j < number_layers - 1; j++) layer_info[j] = layer_info[j + 1]; number_layers--; i--; } } if (number_layers > 0) { for (i = 0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous = layer_info[i - 1].image; if (i < (number_layers - 1)) layer_info[i].image->next = layer_info[i + 1].image; layer_info[i].image->page = layer_info[i].page; } image->next = layer_info[0].image; layer_info[0].image->previous = image; } layer_info = (LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info = DestroyLayerInfo(layer_info, number_layers); } return (status); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo * image_info, Image * image, const PSDInfo * psd_info, ExceptionInfo * exception) { MagickOffsetType * sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression = (PSDCompressionType) ReadBlobMSBShort(image); image->compression = ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); return (MagickFalse); } sizes = (MagickOffsetType *) NULL; if (compression == RLE) { sizes = ReadPSDRLESizes(image, psd_info, image->rows * psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } status = MagickTrue; for (i = 0; i < (ssize_t) psd_info->channels; i++) { if (compression == RLE) status = ReadPSDChannelRLE(image, psd_info, i, sizes + (i * image->rows), exception); else status = ReadPSDChannelRaw(image, psd_info->channels, i, exception); if (status != MagickFalse) status = SetImageProgress(image, LoadImagesTag, i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status = NegateCMYK(image, exception); if (status != MagickFalse) status = CorrectPSDAlphaBlend(image_info, image, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); return (status); } static Image * ReadPSDImage(const ImageInfo * image_info, ExceptionInfo * exception) { Image * image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; ssize_t count; unsigned char *data; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image = AcquireImage(image_info, exception); status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception); if (status == MagickFalse) { image = DestroyImageList(image); return ((Image *) NULL); } /* * Read image header. */ image->endian = MSBEndian; count = ReadBlob(image, 4, (unsigned char *)psd_info.signature); psd_info.version = ReadBlobMSBShort(image); if ((count == 0) || (LocaleNCompare(psd_info.signature, "8BPS", 4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); (void)ReadBlob(image, 6, psd_info.reserved); psd_info.channels = ReadBlobMSBShort(image); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError, "MaximumChannelsExceeded"); psd_info.rows = ReadBlobMSBLong(image); psd_info.columns = ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.depth = ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.mode = ReadBlobMSBShort(image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double)psd_info.columns, (double)psd_info.rows, (double) psd_info.channels, (double)psd_info.depth, ModeToString((PSDImageType) psd_info.mode)); /* * Initialize image. */ image->depth = psd_info.depth; image->columns = psd_info.columns; image->rows = psd_info.rows; status = SetImageExtent(image, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImageList(image)); if (SetImageBackgroundColor(image, exception) == MagickFalse) { image = DestroyImageList(image); return ((Image *) NULL); } if (psd_info.mode == LabMode) SetImageColorspace(image, LabColorspace, exception); if (psd_info.mode == CMYKMode) { SetImageColorspace(image, CMYKColorspace, exception); if (psd_info.channels > 4) SetImageAlphaChannel(image, ActivateAlphaChannel, exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { status = AcquireImageColormap(image, psd_info.depth != 16 ? 256 : 65536, exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image colormap allocated"); SetImageColorspace(image, GRAYColorspace, exception); if (psd_info.channels > 1) SetImageAlphaChannel(image, ActivateAlphaChannel, exception); } else if (psd_info.channels > 3) SetImageAlphaChannel(image, ActivateAlphaChannel, exception); /* * Read PSD raster colormap only present for indexed and duotone images. */ length = ReadBlobMSBLong(image); if (length != 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading colormap"); if (psd_info.mode == DuotoneMode) { /* * Duotone image data; the format of this data is undocumented. */ data = (unsigned char *)AcquireQuantumMemory((size_t) length, sizeof(*data)); if (data == (unsigned char *)NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); (void)ReadBlob(image, (size_t) length, data); data = (unsigned char *)RelinquishMagickMemory(data); } else { size_t number_colors; /* * Read PSD raster colormap. */ number_colors = length / 3; if (number_colors > 65536) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (AcquireImageColormap(image, number_colors, exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].red = ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].green = ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].blue = ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->alpha_trait = UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image = MagickTrue; length = ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* * Image resources block. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading image resource blocks - %.20g bytes", (double) ((MagickOffsetType) length)); blocks = (unsigned char *)AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *)NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); count = ReadBlob(image, (size_t) length, blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *)blocks, "8BIM", 4) != 0)) { blocks = (unsigned char *)RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } ParseImageResourceBlocks(image, blocks, (size_t) length, &has_merged_image, exception); blocks = (unsigned char *)RelinquishMagickMemory(blocks); } /* * Layer and mask block. */ length = GetPSDSize(&psd_info, image); if (length == 8) { length = ReadBlobMSBLong(image); length = ReadBlobMSBLong(image); } offset = TellBlob(image); skip_layers = MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " read composite only"); skip_layers = MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayers(image, image_info, &psd_info, skip_layers, exception) != MagickTrue) { (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } /* * Skip the rest of the layer and mask information. */ SeekBlob(image, offset + length, SEEK_SET); } /* * If we are only "pinging" the image, then we're done - so return. */ if (image_info->ping != MagickFalse) { (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading the precombined layer"); if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1)) has_merged_image = (MagickBooleanType) ReadPSDMergedImage(image_info, image, &psd_info, exception); if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) && (length != 0)) { SeekBlob(image, offset, SEEK_SET); status = ReadPSDLayers(image, image_info, &psd_info, MagickFalse, exception); if (status != MagickTrue) { (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image * merged; if (GetImageListLength(image) == 1) ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); SetImageAlphaChannel(image, TransparentAlphaChannel, exception); image->background_color.alpha = TransparentAlpha; image->background_color.alpha_trait = BlendPixelTrait; merged = MergeImageLayers(image, FlattenLayer, exception); ReplaceImageInList(&image, merged); } (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RegisterPSDImage() adds properties for the PSD image format to % * the list of supported formats. The properties include the image format % * tag, a method to read and/or write the format, whether the format % * supports the saving of more than one frame to the same file or blob, % * whether the format supports native in-memory I/O, and a brief % * description of the format. % % The format of the RegisterPSDImage method * is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo * entry; entry = AcquireMagickInfo("PSD", "PSB", "Adobe Large Document Format"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); entry = AcquireMagickInfo("PSD", "PSD", "Adobe Photoshop bitmap"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); return (MagickImageCoderSignature); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % U n r e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % UnregisterPSDImage() removes format registrations made by the % * PSD module from the list of supported formats. % % The format of the * UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void)UnregisterMagickInfo("PSB"); (void)UnregisterMagickInfo("PSD"); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % W r i t e P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded * image format. % % The format of the WritePSDImage method is: % % * MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, * % ExceptionInfo *exception) % % A description of each parameter * follows. % % o image_info: the image info. % % o image: The image. * % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo * psd_info, Image * image, const size_t offset) { if (psd_info->version == 1) return (WriteBlobMSBShort(image, (unsigned short)offset)); return (WriteBlobMSBLong(image, (unsigned short)offset)); } static inline ssize_t WritePSDOffset(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset = TellBlob(image); SeekBlob(image, offset, SEEK_SET); if (psd_info->version == 1) result = WriteBlobMSBShort(image, (unsigned short)size); else result = (WriteBlobMSBLong(image, (unsigned short)size)); SeekBlob(image, current_offset, SEEK_SET); return (result); } static inline ssize_t SetPSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size) { if (psd_info->version == 1) return (WriteBlobMSBLong(image, (unsigned int)size)); return (WriteBlobMSBLongLong(image, size)); } static inline ssize_t WritePSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset = TellBlob(image); SeekBlob(image, offset, SEEK_SET); if (psd_info->version == 1) result = WriteBlobMSBLong(image, (unsigned int)size); else result = WriteBlobMSBLongLong(image, size); SeekBlob(image, current_offset, SEEK_SET); return (result); } static size_t PSDPackbitsEncodeImage(Image * image, const size_t length, const unsigned char *pixels, unsigned char *compact_pixels, ExceptionInfo * exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* * Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(pixels != (unsigned char *)NULL); assert(compact_pixels != (unsigned char *)NULL); packbits = (unsigned char *)AcquireQuantumMemory(128UL, sizeof(*packbits)); if (packbits == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); q = compact_pixels; for (i = (ssize_t) length; i != 0;) { switch (i) { case 1: { i--; *q++ = (unsigned char)0; *q++ = (*pixels); break; } case 2: { i -= 2; *q++ = (unsigned char)1; *q++ = (*pixels); *q++ = pixels[1]; break; } case 3: { i -= 3; if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { *q++ = (unsigned char)((256 - 3) + 1); *q++ = (*pixels); break; } *q++ = (unsigned char)2; *q++ = (*pixels); *q++ = pixels[1]; *q++ = pixels[2]; break; } default: { if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { /* * Packed run. */ count = 3; while (((ssize_t) count < i) && (*pixels == *(pixels + count))) { count++; if (count >= 127) break; } i -= count; *q++ = (unsigned char)((256 - count) + 1); *q++ = (*pixels); pixels += count; break; } /* * Literal run. */ count = 0; while ((*(pixels + count) != *(pixels + count + 1)) || (*(pixels + count + 1) != *(pixels + count + 2))) { packbits[count + 1] = pixels[count]; count++; if (((ssize_t) count >= (i - 3)) || (count >= 127)) break; } i -= count; *packbits = (unsigned char)(count - 1); for (j = 0; j <= (ssize_t) count; j++) *q++ = packbits[j]; pixels += count; break; } } } *q++ = (unsigned char)128; /* EOD marker */ packbits = (unsigned char *)RelinquishMagickMemory(packbits); return ((size_t) (q - compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo * psd_info, Image * image, const Image * next_image, const ssize_t channels) { size_t length; ssize_t i, y; if (next_image->compression == RLECompression) { length = WriteBlobMSBShort(image, RLE); for (i = 0; i < channels; i++) for (y = 0; y < (ssize_t) next_image->rows; y++) length += SetPSDOffset(psd_info, image, 0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) length = WriteBlobMSBShort(image, ZipWithoutPrediction); #endif else length = WriteBlobMSBShort(image, Raw); return (length); } static size_t WritePSDChannel(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset, const MagickBooleanType separate, ExceptionInfo * exception) { int y; MagickBooleanType monochrome; QuantumInfo * quantum_info; register const Quantum * p; register ssize_t i; size_t count, length; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels = (unsigned char *)NULL; flush = Z_NO_FLUSH; #endif count = 0; if (separate != MagickFalse) { size_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, 1); } if (next_image->depth > 8) next_image->depth = 16; monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info = AcquireQuantumInfo(image_info, next_image); if (quantum_info == (QuantumInfo *) NULL) return (0); pixels = (unsigned char *)GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels = (unsigned char *)AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *)NULL) { quantum_info = DestroyQuantumInfo(quantum_info); return (0); } ResetMagickMemory(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; level = Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level = (int)image_info->quality; if (deflateInit(&stream, level) != Z_OK) { quantum_info = DestroyQuantumInfo(quantum_info); return (0); } } #endif for (y = 0; y < (ssize_t) next_image->rows; y++) { p = GetVirtualPixels(next_image, 0, y, next_image->columns, 1, exception); if (p == (const Quantum *)NULL) break; length = ExportQuantumPixels(next_image, (CacheView *) NULL, quantum_info, quantum_type, pixels, exception); if (monochrome != MagickFalse) for (i = 0; i < (ssize_t) length; i++) pixels[i] = (~pixels[i]); if (next_image->compression == RLECompression) { length = PSDPackbitsEncodeImage(image, length, pixels, compact_pixels, exception); count += WriteBlob(image, length, compact_pixels); size_offset += WritePSDOffset(psd_info, image, length, size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in = (uInt) length; stream.next_in = (Bytef *) pixels; if (y == (ssize_t) next_image->rows - 1) flush = Z_FINISH; do { stream.avail_out = (uInt) CHUNK; stream.next_out = (Bytef *) compressed_pixels; if (deflate(&stream, flush) == Z_STREAM_ERROR) break; length = (size_t) CHUNK - stream.avail_out; if (length > 0) count += WriteBlob(image, length, compressed_pixels); } while (stream.avail_out == 0); } #endif else count += WriteBlob(image, length, pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void)deflateEnd(&stream); compressed_pixels = (unsigned char *)RelinquishMagickMemory( compressed_pixels); } #endif quantum_info = DestroyQuantumInfo(quantum_info); return (count); } static unsigned char * AcquireCompactPixels(const Image * image, ExceptionInfo * exception) { size_t packet_size; unsigned char *compact_pixels; packet_size = image->depth > 8UL ? 2UL : 1UL; compact_pixels = (unsigned char *)AcquireQuantumMemory((9 * image->columns) + 1, packet_size * sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); } return (compact_pixels); } static size_t WritePSDChannels(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, MagickOffsetType size_offset, const MagickBooleanType separate, ExceptionInfo * exception) { Image * mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count = 0; offset_length = 0; rows_offset = 0; compact_pixels = (unsigned char *)NULL; if (next_image->compression == RLECompression) { compact_pixels = AcquireCompactPixels(next_image, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } channels = 1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels = next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, channels); offset_length = (next_image->rows * (psd_info->version == 1 ? 2 : 4)); } size_offset += 2; if (next_image->storage_class == PseudoClass) { length = WritePSDChannel(psd_info, image_info, image, next_image, IndexQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (IsImageGray(next_image) != MagickFalse) { length = WritePSDChannel(psd_info, image_info, image, next_image, GrayQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); length = WritePSDChannel(psd_info, image_info, image, next_image, RedQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, GreenQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, BlueQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; if (next_image->colorspace == CMYKColorspace) { length = WritePSDChannel(psd_info, image_info, image, next_image, BlackQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length = WritePSDChannel(psd_info, image_info, image, next_image, AlphaQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); if (separate != MagickFalse) { const char *property; property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels = AcquireCompactPixels(mask, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } length = WritePSDChannel(psd_info, image_info, image, mask, RedQuantum, compact_pixels, rows_offset, MagickTrue, exception); (void)WritePSDSize(psd_info, image, length, size_offset); count += length; compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); } } } return (count); } static size_t WritePascalString(Image * image, const char *value, size_t padding) { size_t count, length; register ssize_t i; /* * Max length is 255. */ count = 0; length = (strlen(value) > 255UL) ? 255UL : strlen(value); if (length == 0) count += WriteBlobByte(image, 0); else { count += WriteBlobByte(image, (unsigned char)length); count += WriteBlob(image, length, (const unsigned char *)value); } length++; if ((length % padding) == 0) return (count); for (i = 0; i < (ssize_t) (padding - (length % padding)); i++) count += WriteBlobByte(image, 0); return (count); } static void WriteResolutionResourceBlock(Image * image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution = 2.54 * 65536.0 * image->resolution.x + 0.5; y_resolution = 2.54 * 65536.0 * image->resolution.y + 0.5; units = 2; } else { x_resolution = 65536.0 * image->resolution.x + 0.5; y_resolution = 65536.0 * image->resolution.y + 0.5; units = 1; } (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x03ED); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, 16); /* resource size */ (void)WriteBlobMSBLong(image, (unsigned int)(x_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* horizontal resolution unit */ (void)WriteBlobMSBShort(image, units); /* width unit */ (void)WriteBlobMSBLong(image, (unsigned int)(y_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* vertical resolution unit */ (void)WriteBlobMSBShort(image, units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo * psd_info, Image * image, const signed short channel) { size_t count; count = WriteBlobMSBSignedShort(image, channel); count += SetPSDSize(psd_info, image, 0); return (count); } static void RemoveICCProfileFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); if (id == 0x0000040f) { ssize_t quantum; quantum = PSDQuantum(count) + 12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q + quantum < (datum + length - 16))) (void)CopyMagickMemory(q, q + quantum, length - quantum - (q - datum)); SetStringInfoLength(bim_profile, length - quantum); } break; } p += count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; ssize_t cnt; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) return; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); cnt = PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length - 12))) { (void)CopyMagickMemory(q, q + cnt + 12, length - (cnt + 12) - (q - datum)); SetStringInfoLength(bim_profile, length - (cnt + 12)); break; } p += count; if ((count & 0x01) != 0) p++; } } static const StringInfo * GetAdditionalInformation(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* * Whitelist of keys from: * https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo * info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo * profile; unsigned char *p; unsigned int size; info = GetImageProfile(image, "psd:additional-info"); if (info == (const StringInfo *)NULL) return ((const StringInfo *)NULL); option = GetImageOption(image_info, "psd:additional-info"); if (LocaleCompare(option, "all") == 0) return (info); if (LocaleCompare(option, "selective") != 0) { profile = RemoveImageProfile(image, "psd:additional-info"); return (DestroyStringInfo(profile)); } length = GetStringInfoLength(info); p = GetStringInfoDatum(info); remaining_length = length; length = 0; while (remaining_length >= 12) { /* skip over signature */ p += 4; key[0] = (*p++); key[1] = (*p++); key[2] = (*p++); key[3] = (*p++); key[4] = '\0'; size = (unsigned int)(*p++) << 24; size |= (unsigned int)(*p++) << 16; size |= (unsigned int)(*p++) << 8; size |= (unsigned int)(*p++); size = size & 0xffffffff; remaining_length -= 12; if ((size_t) size > remaining_length) return ((const StringInfo *)NULL); found = MagickFalse; for (i = 0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key, allowed[i], PSDKeySize) != 0) continue; found = MagickTrue; break; } remaining_length -= (size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p = (unsigned char *)CopyMagickMemory(p - 12, p + size, remaining_length); continue; } length += (size_t) size + 12; p += size; } profile = RemoveImageProfile(image, "psd:additional-info"); if (length == 0) return (DestroyStringInfo(profile)); SetStringInfoLength(profile, (const size_t)length); SetImageProfile(image, "psd:additional-info", info, exception); return (profile); } static MagickBooleanType WritePSDImage(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo * icc_profile, *info; Image * base_image, *next_image; MagickBooleanType status; MagickOffsetType * layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo * bim_profile; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status = OpenBlob(image_info, image, WriteBinaryBlobMode, exception); if (status == MagickFalse) return (status); packet_size = (size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size += image->depth > 8 ? 2 : 1; psd_info.version = 1; if ((LocaleCompare(image_info->magick, "PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version = 2; (void)WriteBlob(image, 4, (const unsigned char *)"8BPS"); (void)WriteBlobMSBShort(image, psd_info.version); /* version */ for (i = 1; i <= 6; i++) (void)WriteBlobByte(image, 0); /* 6 bytes of reserved */ if (SetImageGray(image, exception) != MagickFalse) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void)SetImageStorageClass(image, DirectClass, exception); if (image->colorspace != CMYKColorspace) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels = (image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void)WriteBlobMSBShort(image, (unsigned short)num_channels); (void)WriteBlobMSBLong(image, (unsigned int)image->rows); (void)WriteBlobMSBLong(image, (unsigned int)image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* * Write depth & mode. */ monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void)WriteBlobMSBShort(image, (unsigned short)(image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void)TransformImageColorspace(image, sRGBColorspace, exception); (void)WriteBlobMSBShort(image, (unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void)TransformImageColorspace(image, CMYKColorspace, exception); (void)WriteBlobMSBShort(image, CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void)WriteBlobMSBLong(image, 0); else { /* * Write PSD raster colormap. */ (void)WriteBlobMSBLong(image, 768); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(image->colormap[i].red)); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar( image->colormap[i].green)); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(image->colormap[i].blue)); for (; i < 256; i++) (void)WriteBlobByte(image, 0); } /* * Image resource block. */ length = 28; /* 0x03EB */ bim_profile = (StringInfo *) GetImageProfile(image, "8bim"); icc_profile = GetImageProfile(image, "icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile = CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length += PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *)NULL) length += PSDQuantum(GetStringInfoLength(icc_profile)) + 12; (void)WriteBlobMSBLong(image, (unsigned int)length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void)WriteBlob(image, GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile = DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x0000040F); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, (unsigned int)GetStringInfoLength( icc_profile)); (void)WriteBlob(image, GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((MagickOffsetType) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void)WriteBlobByte(image, 0); } base_image = GetNextImageInList(image); if (base_image == (Image *) NULL) base_image = image; size = 0; size_offset = TellBlob(image); SetPSDSize(&psd_info, image, 0); SetPSDSize(&psd_info, image, 0); layer_count = 0; for (next_image = base_image; next_image != NULL;) { layer_count++; next_image = GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size += WriteBlobMSBShort(image, -(unsigned short)layer_count); else size += WriteBlobMSBShort(image, (unsigned short)layer_count); layer_size_offsets = (MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count, sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); layer_index = 0; for (next_image = base_image; next_image != NULL;) { Image * mask; unsigned char default_color; unsigned short channels, total_channels; mask = (Image *) NULL; property = GetImageArtifact(next_image, "psd:opacity-mask"); default_color = 0; if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); default_color = strlen(property) == 9 ? 255 : 0; } size += WriteBlobMSBLong(image, (unsigned int)next_image->page.y); size += WriteBlobMSBLong(image, (unsigned int)next_image->page.x); size += WriteBlobMSBLong(image, (unsigned int)(next_image->page.y + next_image->rows)); size += WriteBlobMSBLong(image, (unsigned int)(next_image->page.x + next_image->columns)); channels = 1U; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels = next_image->colorspace == CMYKColorspace ? 4U : 3U; total_channels = channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size += WriteBlobMSBShort(image, total_channels); layer_size_offsets[layer_index++] = TellBlob(image); for (i = 0; i < (ssize_t) channels; i++) size += WriteChannelSize(&psd_info, image, (signed short)i); if (next_image->alpha_trait != UndefinedPixelTrait) size += WriteChannelSize(&psd_info, image, -1); if (mask != (Image *) NULL) size += WriteChannelSize(&psd_info, image, -2); size += WriteBlob(image, 4, (const unsigned char *)"8BIM"); size += WriteBlob(image, 4, (const unsigned char *) CompositeOperatorToPSDBlendMode(next_image->compose)); property = GetImageArtifact(next_image, "psd:layer.opacity"); if (property != (const char *)NULL) { Quantum opacity; opacity = (Quantum) StringToInteger(property); size += WriteBlobByte(image, ScaleQuantumToChar(opacity)); (void)ApplyPSDLayerOpacity(next_image, opacity, MagickTrue, exception); } else size += WriteBlobByte(image, 255); size += WriteBlobByte(image, 0); size += WriteBlobByte(image, next_image->compose == NoCompositeOp ? 1 << 0x02 : 1); /* layer properties - * visible, etc. */ size += WriteBlobByte(image, 0); info = GetAdditionalInformation(image_info, next_image, exception); property = (const char *)GetImageProperty(next_image, "label", exception); if (property == (const char *)NULL) { (void)FormatLocaleString(layer_name, MagickPathExtent, "L%.20g", (double)layer_index); property = layer_name; } name_length = strlen(property) + 1; if ((name_length % 4) != 0) name_length += (4 - (name_length % 4)); if (info != (const StringInfo *)NULL) name_length += GetStringInfoLength(info); name_length += 8; if (mask != (Image *) NULL) name_length += 20; size += WriteBlobMSBLong(image, (unsigned int)name_length); if (mask == (Image *) NULL) size += WriteBlobMSBLong(image, 0); else { if (mask->compose != NoCompositeOp) (void)ApplyPSDOpacityMask(next_image, mask, ScaleCharToQuantum( default_color), MagickTrue, exception); mask->page.y += image->page.y; mask->page.x += image->page.x; size += WriteBlobMSBLong(image, 20); size += WriteBlobMSBSignedLong(image, mask->page.y); size += WriteBlobMSBSignedLong(image, mask->page.x); size += WriteBlobMSBLong(image, (const unsigned int)mask->rows + mask->page.y); size += WriteBlobMSBLong(image, (const unsigned int)mask->columns + mask->page.x); size += WriteBlobByte(image, default_color); size += WriteBlobByte(image, mask->compose == NoCompositeOp ? 2 : 0); size += WriteBlobMSBShort(image, 0); } size += WriteBlobMSBLong(image, 0); size += WritePascalString(image, property, 4); if (info != (const StringInfo *)NULL) size += WriteBlob(image, GetStringInfoLength(info), GetStringInfoDatum(info)); next_image = GetNextImageInList(next_image); } /* * Now the image data! */ next_image = base_image; layer_index = 0; while (next_image != NULL) { length = WritePSDChannels(&psd_info, image_info, image, next_image, layer_size_offsets[layer_index++], MagickTrue, exception); if (length == 0) { status = MagickFalse; break; } size += length; next_image = GetNextImageInList(next_image); } (void)WriteBlobMSBLong(image, 0); /* user mask data */ /* * Write the total size */ size_offset += WritePSDSize(&psd_info, image, size + (psd_info.version == 1 ? 8 : 16), size_offset); if ((size / 2) != ((size + 1) / 2)) rounded_size = size + 1; else rounded_size = size; (void)WritePSDSize(&psd_info, image, rounded_size, size_offset); layer_size_offsets = (MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* * Remove the opacity mask from the registry */ next_image = base_image; while (next_image != (Image *) NULL) { property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) DeleteImageRegistry(property); next_image = GetNextImageInList(next_image); } /* * Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression = image->compression; if (image->compression == ZipCompression) image->compression = RLECompression; if (WritePSDChannels(&psd_info, image_info, image, image, 0, MagickFalse, exception) == 0) status = MagickFalse; image->compression = compression; } (void)CloseBlob(image); return (status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* * Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* * Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* * Typedef declaractions. */ typedef struct _ChannelInfo { short int type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image * image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image * image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[256], visible; unsigned short channels; StringInfo * info; } LayerInfo; /* * Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *, Image *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s P S D * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsPSD()() returns MagickTrue if the image format type, * identified by the % magick string, is PSD. % % The format of the IsPSD * method is: % % MagickBooleanType IsPSD(const unsigned char * *magick,const size_t length) % % A description of each parameter follows: * % % o magick: compare image format pattern against these bytes. % % * o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick, const size_t length) { if (length < 4) return (MagickFalse); if (LocaleNCompare((const char *)magick, "8BPS", 4) == 0) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e a d P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns * it. It % allocates the memory necessary for the new Image structure and * returns a % pointer to the new image. % % The format of the ReadPSDImage * method is: % % Image *ReadPSDImage(image_info,ExceptionInfo * *exception) % % A description of each parameter follows: % % o * image_info: the image info. % % o exception: return any errors or * warnings in this structure. % */ static const char * CompositeOperatorToPSDBlendMode(CompositeOperator op) { const char *blend_mode; switch (op) { case ColorBurnCompositeOp: blend_mode = "idiv"; break; case ColorDodgeCompositeOp: blend_mode = "div "; break; case ColorizeCompositeOp: blend_mode = "colr"; break; case DarkenCompositeOp: blend_mode = "dark"; break; case DifferenceCompositeOp: blend_mode = "diff"; break; case DissolveCompositeOp: blend_mode = "diss"; break; case ExclusionCompositeOp: blend_mode = "smud"; break; case HardLightCompositeOp: blend_mode = "hLit"; break; case HardMixCompositeOp: blend_mode = "hMix"; break; case HueCompositeOp: blend_mode = "hue "; break; case LightenCompositeOp: blend_mode = "lite"; break; case LinearBurnCompositeOp: blend_mode = "lbrn"; break; case LinearDodgeCompositeOp: blend_mode = "lddg"; break; case LinearLightCompositeOp: blend_mode = "lLit"; break; case LuminizeCompositeOp: blend_mode = "lum "; break; case MultiplyCompositeOp: blend_mode = "mul "; break; case OverCompositeOp: blend_mode = "norm"; break; case OverlayCompositeOp: blend_mode = "over"; break; case PinLightCompositeOp: blend_mode = "pLit"; break; case SaturateCompositeOp: blend_mode = "sat "; break; case ScreenCompositeOp: blend_mode = "scrn"; break; case SoftLightCompositeOp: blend_mode = "sLit"; break; case VividLightCompositeOp: blend_mode = "vLit"; break; default: blend_mode = "norm"; } return (blend_mode); } /* * For some reason Photoshop seems to blend semi-transparent pixels with * white. This method reverts the blending. This can be disabled by setting * the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return (MagickTrue); option = GetImageOption(image_info, "psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return (MagickTrue); status = MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma = QuantumScale * GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); if (channel != AlphaPixelChannel) q[i] = ClampToQuantum((q[i] - ((1.0 - gamma) * QuantumRange)) / gamma); } } q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image * image, Quantum opacity, MagickBooleanType revert, ExceptionInfo * exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying layer opacity %.20g", (double)opacity); if (opacity == OpaqueAlpha) return (MagickTrue); image->alpha_trait = BlendPixelTrait; status = MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image, (Quantum) (QuantumScale * (GetPixelAlpha(image, q)) * opacity), q); else if (opacity > 0) SetPixelAlpha(image, (Quantum) (QuantumRange * (GetPixelAlpha(image, q) / (MagickRealType) opacity)), q); q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static MagickBooleanType ApplyPSDOpacityMask(Image * image, const Image * mask, Quantum background, MagickBooleanType revert, ExceptionInfo * exception) { Image * complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying opacity mask"); complete_mask = CloneImage(image, image->columns, image->rows, MagickTrue, exception); complete_mask->alpha_trait = BlendPixelTrait; GetPixelInfo(complete_mask, &color); color.red = background; SetImageColor(complete_mask, &color, exception); status = CompositeImage(complete_mask, mask, OverCompositeOp, MagickTrue, mask->page.x - image->page.x, mask->page.y - image->page.y, exception); if (status == MagickFalse) { complete_mask = DestroyImage(complete_mask); return (status); } image->alpha_trait = BlendPixelTrait; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register Quantum * p; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); p = GetAuthenticPixels(complete_mask, 0, y, complete_mask->columns, 1, exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha = GetPixelAlpha(image, q); intensity = GetPixelIntensity(complete_mask, p); if (revert == MagickFalse) SetPixelAlpha(image, ClampToQuantum(intensity * (QuantumScale * alpha)), q); else if (intensity > 0) SetPixelAlpha(image, ClampToQuantum((alpha / intensity) * QuantumRange), q); q += GetPixelChannels(image); p += GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } complete_mask = DestroyImage(complete_mask); return (status); } static void PreservePSDOpacityMask(Image * image, LayerInfo * layer_info, ExceptionInfo * exception) { char *key; RandomInfo * random_info; StringInfo * key_info; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " preserving opacity mask"); random_info = AcquireRandomInfo(); key_info = GetRandomKey(random_info, 2 + 1); key = (char *)GetStringInfoDatum(key_info); key[8] = layer_info->mask.background; key[9] = '\0'; layer_info->mask.image->page.x += layer_info->page.x; layer_info->mask.image->page.y += layer_info->page.y; (void)SetImageRegistry(ImageRegistryType, (const char *)key, layer_info->mask.image, exception); (void)SetImageArtifact(layer_info->image, "psd:opacity-mask", (const char *)key); key_info = DestroyStringInfo(key_info); random_info = DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels, const ssize_t depth, const size_t number_pixels, unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets = (ssize_t) number_compact_pixels; for (i = 0; (packets > 1) && (i < (ssize_t) number_pixels);) { packets--; length = (size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length = 256 - length + 1; CheckNumberCompactPixels; pixel = (*compact_pixels++); for (j = 0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (pixel >> 7) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 6) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 5) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 4) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 3) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 2) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 1) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (unsigned char)((pixel >> 6) & 0x03); *pixels++ = (unsigned char)((pixel >> 4) & 0x03); *pixels++ = (unsigned char)((pixel >> 2) & 0x03); *pixels++ = (unsigned char)((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++ = (unsigned char)((pixel >> 4) & 0xff); *pixels++ = (unsigned char)((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++ = (unsigned char)pixel; break; } } } continue; } length++; for (j = 0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (*compact_pixels >> 6) & 0x03; *pixels++ = (*compact_pixels >> 4) & 0x03; *pixels++ = (*compact_pixels >> 2) & 0x03; *pixels++ = (*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++ = (*compact_pixels >> 4) & 0xff; *pixels++ = (*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++ = (*compact_pixels); break; } } compact_pixels++; } } return (i); } static inline LayerInfo * DestroyLayerInfo(LayerInfo * layer_info, const ssize_t number_layers) { ssize_t i; for (i = 0; i < number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image = DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image = DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(Image * image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return (2); else if (image->depth > 8) return (2); } else if (image->depth > 8) return (2); return (1); } static inline MagickSizeType GetPSDSize(const PSDInfo * psd_info, Image * image) { if (psd_info->version == 1) return ((MagickSizeType) ReadBlobLong(image)); return ((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image * image) { if (image->depth == 1) return (((image->columns + 7) / 8) * GetPSDPacketSize(image)); else return (image->columns * GetPSDPacketSize(image)); } static const char * ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image * image, ExceptionInfo * exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask = SetImageChannelMask(image, (ChannelType) (AllChannels & ~ AlphaChannel)); status = NegateImage(image, MagickFalse, exception); (void)SetImageChannelMask(image, channel_mask); return (status); } static void ParseImageResourceBlocks(Image * image, const unsigned char *blocks, size_t length, MagickBooleanType * has_merged_image, ExceptionInfo * exception) { const unsigned char *p; StringInfo * profile; unsigned int count, long_sans; unsigned short id, short_sans; if (length < 16) return; profile = BlobToStringInfo((const unsigned char *)NULL, length); SetStringInfoDatum(profile, blocks); (void)SetImageProfile(image, "8bim", profile, exception); profile = DestroyStringInfo(profile); for (p = blocks; (p >= blocks) && (p < (blocks + length - 16));) { if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); if ((p + count) > (blocks + length - 16)) return; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* * Resolution info. */ p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.x = (double)resolution; (void)FormatLocaleString(value, MagickPathExtent, "%g", image->resolution.x); (void)SetImageProperty(image, "tiff:XResolution", value, exception); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.y = (double)resolution; (void)FormatLocaleString(value, MagickPathExtent, "%g", image->resolution.y); (void)SetImageProperty(image, "tiff:YResolution", value, exception); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); image->units = PixelsPerInchResolution; break; } case 0x0421: { if (*(p + 4) == 0) *has_merged_image = MagickFalse; p += count; break; } default: { p += count; break; } } if ((count & 0x01) != 0) p++; } return; } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *)NULL) return (OverCompositeOp); if (LocaleNCompare(mode, "norm", 4) == 0) return (OverCompositeOp); if (LocaleNCompare(mode, "mul ", 4) == 0) return (MultiplyCompositeOp); if (LocaleNCompare(mode, "diss", 4) == 0) return (DissolveCompositeOp); if (LocaleNCompare(mode, "diff", 4) == 0) return (DifferenceCompositeOp); if (LocaleNCompare(mode, "dark", 4) == 0) return (DarkenCompositeOp); if (LocaleNCompare(mode, "lite", 4) == 0) return (LightenCompositeOp); if (LocaleNCompare(mode, "hue ", 4) == 0) return (HueCompositeOp); if (LocaleNCompare(mode, "sat ", 4) == 0) return (SaturateCompositeOp); if (LocaleNCompare(mode, "colr", 4) == 0) return (ColorizeCompositeOp); if (LocaleNCompare(mode, "lum ", 4) == 0) return (LuminizeCompositeOp); if (LocaleNCompare(mode, "scrn", 4) == 0) return (ScreenCompositeOp); if (LocaleNCompare(mode, "over", 4) == 0) return (OverlayCompositeOp); if (LocaleNCompare(mode, "hLit", 4) == 0) return (HardLightCompositeOp); if (LocaleNCompare(mode, "sLit", 4) == 0) return (SoftLightCompositeOp); if (LocaleNCompare(mode, "smud", 4) == 0) return (ExclusionCompositeOp); if (LocaleNCompare(mode, "div ", 4) == 0) return (ColorDodgeCompositeOp); if (LocaleNCompare(mode, "idiv", 4) == 0) return (ColorBurnCompositeOp); if (LocaleNCompare(mode, "lbrn", 4) == 0) return (LinearBurnCompositeOp); if (LocaleNCompare(mode, "lddg", 4) == 0) return (LinearDodgeCompositeOp); if (LocaleNCompare(mode, "lLit", 4) == 0) return (LinearLightCompositeOp); if (LocaleNCompare(mode, "vLit", 4) == 0) return (VividLightCompositeOp); if (LocaleNCompare(mode, "pLit", 4) == 0) return (PinLightCompositeOp); if (LocaleNCompare(mode, "hMix", 4) == 0) return (HardMixCompositeOp); return (OverCompositeOp); } static inline void ReversePSDString(Image * image, char *p, size_t length) { char *q; if (image->endian == MSBEndian) return; q = p + length; for (--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image * image, const size_t channels, const ssize_t type, const size_t packet_size, const Quantum pixel, Quantum * q, ExceptionInfo * exception) { if (image->storage_class == PseudoClass) { if (packet_size == 1) SetPixelIndex(image, ScaleQuantumToChar(pixel), q); else SetPixelIndex(image, ScaleQuantumToShort(pixel), q); SetPixelViaPixelInfo(image, image->colormap + (ssize_t) ConstrainColormapIndex(image, GetPixelIndex(image, q), exception), q); return; } switch (type) { case -1: { SetPixelAlpha(image, pixel, q); break; } case -2: case 0: { SetPixelRed(image, pixel, q); if (channels == 1 || type == -2) SetPixelGray(image, pixel, q); break; } case 1: { if (image->storage_class == PseudoClass) SetPixelAlpha(image, pixel, q); else SetPixelGreen(image, pixel, q); break; } case 2: { if (image->storage_class == PseudoClass) SetPixelAlpha(image, pixel, q); else SetPixelBlue(image, pixel, q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image, pixel, q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image * image, const size_t channels, const size_t row, const ssize_t type, const unsigned char *pixels, ExceptionInfo * exception) { Quantum pixel; register const unsigned char *p; register Quantum * q; register ssize_t x; size_t packet_size; unsigned short nibble; p = pixels; q = GetAuthenticPixels(image, 0, row, image->columns, 1, exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size = GetPSDPacketSize(image); for (x = 0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel = ScaleCharToQuantum(*p++); else { p = PushShortPixel(MSBEndian, p, &nibble); pixel = ScaleShortToQuantum(nibble); } if (image->depth > 1) { SetPSDPixel(image, channels, type, packet_size, pixel, q, exception); q += GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits = image->columns - x; if (number_bits > 8) number_bits = 8; for (bit = 0; bit < number_bits; bit++) { SetPSDPixel(image, channels, type, packet_size, (((unsigned char)pixel) & (0x01 << (7 - bit))) != 0 ? 0 : QuantumRange, q, exception); q += GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return (SyncAuthenticPixels(image, exception)); } static MagickBooleanType ReadPSDChannelRaw(Image * image, const size_t channels, const ssize_t type, ExceptionInfo * exception) { MagickBooleanType status; size_t count, row_size; ssize_t y; unsigned char *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RAW"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, row_size, pixels); if (count != row_size) break; status = ReadPSDChannelPixels(image, channels, y, type, pixels, exception); if (status == MagickFalse) break; } pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } static inline MagickOffsetType * ReadPSDRLESizes(Image * image, const PSDInfo * psd_info, const size_t size) { MagickOffsetType * sizes; ssize_t y; sizes = (MagickOffsetType *) AcquireQuantumMemory(size, sizeof(*sizes)); if (sizes != (MagickOffsetType *) NULL) { for (y = 0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y] = (MagickOffsetType) ReadBlobShort(image); else sizes[y] = (MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image * image, const PSDInfo * psd_info, const ssize_t type, MagickOffsetType * sizes, ExceptionInfo * exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RLE compressed"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); length = 0; for (y = 0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length = (size_t) sizes[y]; if (length > row_size + 256) //arbitrary number { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "InvalidLength", image->filename); } compact_pixels = (unsigned char *)AcquireQuantumMemory(length, sizeof(*pixels)); if (compact_pixels == (unsigned char *)NULL) { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)ResetMagickMemory(compact_pixels, 0, length * sizeof(*compact_pixels)); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, (size_t) sizes[y], compact_pixels); if (count != (ssize_t) sizes[y]) break; count = DecodePSDPixels((size_t) sizes[y], compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth), row_size, pixels); if (count != (ssize_t) row_size) break; status = ReadPSDChannelPixels(image, psd_info->channels, y, type, pixels, exception); if (status == MagickFalse) break; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image * image, const size_t channels, const ssize_t type, const PSDCompressionType compression, const size_t compact_size, ExceptionInfo * exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is ZIP compressed"); compact_pixels = (unsigned char *)AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); packet_size = GetPSDPacketSize(image); row_size = image->columns * packet_size; count = image->rows * row_size; pixels = (unsigned char *)AcquireQuantumMemory(count, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) { compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } ResetMagickMemory(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; (void)ReadBlob(image, compact_size, compact_pixels); stream.next_in = (Bytef *) compact_pixels; stream.avail_in = (uInt) compact_size; stream.next_out = (Bytef *) pixels; stream.avail_out = (uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret = inflate(&stream, Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (MagickFalse); } } } if (compression == ZipWithPrediction) { p = pixels; while (count > 0) { length = image->columns; while (--length) { if (packet_size == 2) { p[2] += p[0] + ((p[1] + p[3]) >> 8); p[3] += p[1]; } else *(p + 1) += *p; p += packet_size; } p += packet_size; count -= row_size; } } status = MagickTrue; p = pixels; for (y = 0; y < (ssize_t) image->rows; y++) { status = ReadPSDChannelPixels(image, channels, y, type, p, exception); if (status == MagickFalse) break; p += row_size; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #endif static MagickBooleanType ReadPSDChannel(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, const size_t channel, const PSDCompressionType compression, ExceptionInfo * exception) { Image * channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image = image; mask = (Image *) NULL; if (layer_info->channel_info[channel].type < -1) { const char *option; /* * Ignore mask that is not a user supplied layer mask, if the mask is * disabled or if the flags have unsupported values. */ option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { SeekBlob(image, layer_info->channel_info[channel].size - 2, SEEK_CUR); return (MagickTrue); } mask = CloneImage(image, layer_info->mask.page.width, layer_info->mask.page.height, MagickFalse, exception); SetImageType(mask, GrayscaleType, exception); channel_image = mask; } offset = TellBlob(image); status = MagickTrue; switch (compression) { case Raw: status = ReadPSDChannelRaw(channel_image, psd_info->channels, layer_info->channel_info[channel].type, exception); break; case RLE: { MagickOffsetType * sizes; sizes = ReadPSDRLESizes(channel_image, psd_info, channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); status = ReadPSDChannelRLE(channel_image, psd_info, layer_info->channel_info[channel].type, sizes, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status = ReadPSDChannelZip(channel_image, layer_info->channels, layer_info->channel_info[channel].type, compression, layer_info->channel_info[channel].size - 2, exception); #else (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)", image->filename); #endif break; default: (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); break; } SeekBlob(image, offset + layer_info->channel_info[channel].size - 2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) DestroyImage(mask); ThrowBinaryException(CoderError, "UnableToDecompressImage", image->filename); } layer_info->mask.image = mask; return (status); } static MagickBooleanType ReadPSDLayer(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, ExceptionInfo * exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void)SetImageBackgroundColor(layer_info->image, exception); layer_info->image->compose = PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose = NoCompositeOp; if (psd_info->mode == CMYKMode) SetImageColorspace(layer_info->image, CMYKColorspace, exception); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) SetImageColorspace(layer_info->image, GRAYColorspace, exception); /* * Set up some hidden attributes for folks that need them. */ (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.x); (void)SetImageArtifact(layer_info->image, "psd:layer.x", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.y); (void)SetImageArtifact(layer_info->image, "psd:layer.y", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double) layer_info->opacity); (void)SetImageArtifact(layer_info->image, "psd:layer.opacity", message); (void)SetImageProperty(layer_info->image, "label", (char *)layer_info->name, exception); status = MagickTrue; for (j = 0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for channel %.20g", (double)j); compression = (PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression = ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait = BlendPixelTrait; status = ReadPSDChannel(layer_info->image, image_info, psd_info, layer_info, j, compression, exception); if (status == MagickFalse) break; } if (status != MagickFalse) status = ApplyPSDLayerOpacity(layer_info->image, layer_info->opacity, MagickFalse, exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status = NegateCMYK(layer_info->image, exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x = layer_info->mask.page.x; layer_info->mask.image->page.y = layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose = NoCompositeOp; else status = ApplyPSDOpacityMask(layer_info->image, layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange, MagickFalse, exception); option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image, layer_info, exception); layer_info->mask.image = DestroyImage(layer_info->mask.image); } return (status); } ModuleExport MagickBooleanType ReadPSDLayers(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, const MagickBooleanType skip_layers, ExceptionInfo * exception) { char type[4]; LayerInfo * layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size = GetPSDSize(psd_info, image); if (size == 0) { /* * Skip layers & masks. */ (void)ReadBlobLong(image); count = ReadBlob(image, 4, (unsigned char *)type); ReversePSDString(image, type, 4); status = MagickFalse; if ((count == 0) || (LocaleNCompare(type, "8BIM", 4) != 0)) return (MagickTrue); else { count = ReadBlob(image, 4, (unsigned char *)type); ReversePSDString(image, type, 4); if ((count != 0) && (LocaleNCompare(type, "Lr16", 4) == 0)) size = GetPSDSize(psd_info, image); else return (MagickTrue); } } status = MagickTrue; if (size != 0) { layer_info = (LayerInfo *) NULL; number_layers = (short)ReadBlobShort(image); if (number_layers < 0) { /* * The first alpha channel in the merged result contains the * transparency data for the merged result. */ number_layers = MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " negative layer count corrected for"); image->alpha_trait = BlendPixelTrait; } /* * We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return (MagickTrue); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image contains %.20g layers", (double)number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError, "InvalidNumberOfLayers", image->filename); layer_info = (LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)ResetMagickMemory(layer_info, 0, (size_t) number_layers * sizeof(*layer_info)); for (i = 0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading layer #%.20g", (double)i + 1); layer_info[i].page.y = ReadBlobSignedLong(image); layer_info[i].page.x = ReadBlobSignedLong(image); y = ReadBlobSignedLong(image); x = ReadBlobSignedLong(image); layer_info[i].page.width = (size_t) (x - layer_info[i].page.x); layer_info[i].page.height = (size_t) (y - layer_info[i].page.y); layer_info[i].channels = ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double)layer_info[i].page.x, (double)layer_info[i].page.y, (double)layer_info[i].page.height, (double) layer_info[i].page.width, (double)layer_info[i].channels); for (j = 0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type = (short)ReadBlobShort(image); layer_info[i].channel_info[j].size = (size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g", (double)j, (double)layer_info[i].channel_info[j].type, (double)layer_info[i].channel_info[j].size); } count = ReadBlob(image, 4, (unsigned char *)type); ReversePSDString(image, type, 4); if ((count == 0) || (LocaleNCompare(type, "8BIM", 4) != 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } count = ReadBlob(image, 4, (unsigned char *)layer_info[i].blendkey); ReversePSDString(image, layer_info[i].blendkey, 4); layer_info[i].opacity = (Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping = (unsigned char)ReadBlobByte(image); layer_info[i].flags = (unsigned char)ReadBlobByte(image); layer_info[i].visible = !(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey, (double)layer_info[i].opacity, layer_info[i].clipping ? "true" : "false", layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void)ReadBlobByte(image); /* filler */ size = ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer contains additional info"); length = ReadBlobLong(image); combined_length = length + 4; if (length != 0) { /* * Layer mask info. */ layer_info[i].mask.page.y = ReadBlobSignedLong(image); layer_info[i].mask.page.x = ReadBlobSignedLong(image); layer_info[i].mask.page.height = (size_t) (ReadBlobLong(image) - layer_info[i].mask.page.y); layer_info[i].mask.page.width = (size_t) (ReadBlobLong(image) - layer_info[i].mask.page.x); layer_info[i].mask.background = (unsigned char)ReadBlobByte( image); layer_info[i].mask.flags = (unsigned char)ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y = layer_info[i].mask.page.y - layer_info[i].page.y; layer_info[i].mask.page.x = layer_info[i].mask.page.x - layer_info[i].page.x; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double)layer_info[i].mask.page.x, (double) layer_info[i].mask.page.y, (double)layer_info[i].mask.page.width, (double)layer_info[i].mask.page.height, (double) ((MagickOffsetType) length) - 18); /* * Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image, (MagickSizeType) (length - 18)) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = ReadBlobLong(image); combined_length += length + 4; if (length != 0) { /* * Layer blending ranges info. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer blending ranges: length=%.20g", (double) ((MagickOffsetType) length)); /* * We read it, but don't use it... */ for (j = 0; j < (ssize_t) length; j += 8) { size_t blend_source = ReadBlobLong(image); size_t blend_dest = ReadBlobLong(image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " source(%x), dest(%x)", (unsigned int) blend_source, (unsigned int)blend_dest); } } /* * Layer name. */ length = (MagickSizeType) (unsigned char)ReadBlobByte(image); combined_length += length + 1; if (length > 0) (void)ReadBlob(image, (size_t) length++, layer_info[i].name); layer_info[i].name[length] = '\0'; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer name: %s", layer_info[i].name); if ((length % 4) != 0) { length = 4 - (length % 4); combined_length += length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image, length) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = (MagickSizeType) size - combined_length; if (length > 0) { unsigned char *info; layer_info[i].info = AcquireStringInfo((const size_t)length); info = GetStringInfoDatum(layer_info[i].info); (void)ReadBlob(image, (const size_t)length, info); } } } for (i = 0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); continue; } /* * Allocate layered image. */ layer_info[i].image = CloneImage(image, layer_info[i].page.width, layer_info[i].page.height, MagickFalse, exception); if (layer_info[i].image == (Image *) NULL) { layer_info = DestroyLayerInfo(layer_info, number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of image for layer %.20g failed", (double)i); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void)SetImageProfile(layer_info[i].image, "psd:additional-info", layer_info[i].info, exception); layer_info[i].info = DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i = 0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j = 0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image, (MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } continue; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for layer %.20g", (double)i); status = ReadPSDLayer(image, image_info, psd_info, &layer_info[i], exception); if (status == MagickFalse) break; status = SetImageProgress(image, LoadImagesTag, i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i = 0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j = i; j < number_layers - 1; j++) layer_info[j] = layer_info[j + 1]; number_layers--; i--; } } if (number_layers > 0) { for (i = 0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous = layer_info[i - 1].image; if (i < (number_layers - 1)) layer_info[i].image->next = layer_info[i + 1].image; layer_info[i].image->page = layer_info[i].page; } image->next = layer_info[0].image; layer_info[0].image->previous = image; } layer_info = (LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info = DestroyLayerInfo(layer_info, number_layers); } return (status); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo * image_info, Image * image, const PSDInfo * psd_info, ExceptionInfo * exception) { MagickOffsetType * sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression = (PSDCompressionType) ReadBlobMSBShort(image); image->compression = ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); return (MagickFalse); } sizes = (MagickOffsetType *) NULL; if (compression == RLE) { sizes = ReadPSDRLESizes(image, psd_info, image->rows * psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } status = MagickTrue; for (i = 0; i < (ssize_t) psd_info->channels; i++) { if (compression == RLE) status = ReadPSDChannelRLE(image, psd_info, i, sizes + (i * image->rows), exception); else status = ReadPSDChannelRaw(image, psd_info->channels, i, exception); if (status != MagickFalse) status = SetImageProgress(image, LoadImagesTag, i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status = NegateCMYK(image, exception); if (status != MagickFalse) status = CorrectPSDAlphaBlend(image_info, image, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); return (status); } static Image * ReadPSDImage(const ImageInfo * image_info, ExceptionInfo * exception) { Image * image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; ssize_t count; unsigned char *data; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image = AcquireImage(image_info, exception); status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception); if (status == MagickFalse) { image = DestroyImageList(image); return ((Image *) NULL); } /* * Read image header. */ image->endian = MSBEndian; count = ReadBlob(image, 4, (unsigned char *)psd_info.signature); psd_info.version = ReadBlobMSBShort(image); if ((count == 0) || (LocaleNCompare(psd_info.signature, "8BPS", 4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); (void)ReadBlob(image, 6, psd_info.reserved); psd_info.channels = ReadBlobMSBShort(image); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError, "MaximumChannelsExceeded"); psd_info.rows = ReadBlobMSBLong(image); psd_info.columns = ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.depth = ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.mode = ReadBlobMSBShort(image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double)psd_info.columns, (double)psd_info.rows, (double) psd_info.channels, (double)psd_info.depth, ModeToString((PSDImageType) psd_info.mode)); /* * Initialize image. */ image->depth = psd_info.depth; image->columns = psd_info.columns; image->rows = psd_info.rows; status = SetImageExtent(image, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImageList(image)); if (SetImageBackgroundColor(image, exception) == MagickFalse) { image = DestroyImageList(image); return ((Image *) NULL); } if (psd_info.mode == LabMode) SetImageColorspace(image, LabColorspace, exception); if (psd_info.mode == CMYKMode) { SetImageColorspace(image, CMYKColorspace, exception); if (psd_info.channels > 4) SetImageAlphaChannel(image, ActivateAlphaChannel, exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { status = AcquireImageColormap(image, psd_info.depth != 16 ? 256 : 65536, exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image colormap allocated"); SetImageColorspace(image, GRAYColorspace, exception); if (psd_info.channels > 1) SetImageAlphaChannel(image, ActivateAlphaChannel, exception); } else if (psd_info.channels > 3) SetImageAlphaChannel(image, ActivateAlphaChannel, exception); /* * Read PSD raster colormap only present for indexed and duotone images. */ length = ReadBlobMSBLong(image); if (length != 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading colormap"); if (psd_info.mode == DuotoneMode) { /* * Duotone image data; the format of this data is undocumented. */ data = (unsigned char *)AcquireQuantumMemory((size_t) length, sizeof(*data)); if (data == (unsigned char *)NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); (void)ReadBlob(image, (size_t) length, data); data = (unsigned char *)RelinquishMagickMemory(data); } else { size_t number_colors; /* * Read PSD raster colormap. */ number_colors = length / 3; if (number_colors > 65536) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (AcquireImageColormap(image, number_colors, exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].red = ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].green = ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].blue = ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->alpha_trait = UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image = MagickTrue; length = ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* * Image resources block. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading image resource blocks - %.20g bytes", (double) ((MagickOffsetType) length)); blocks = (unsigned char *)AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *)NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); count = ReadBlob(image, (size_t) length, blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *)blocks, "8BIM", 4) != 0)) { blocks = (unsigned char *)RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } ParseImageResourceBlocks(image, blocks, (size_t) length, &has_merged_image, exception); blocks = (unsigned char *)RelinquishMagickMemory(blocks); } /* * Layer and mask block. */ length = GetPSDSize(&psd_info, image); if (length == 8) { length = ReadBlobMSBLong(image); length = ReadBlobMSBLong(image); } offset = TellBlob(image); skip_layers = MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " read composite only"); skip_layers = MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayers(image, image_info, &psd_info, skip_layers, exception) != MagickTrue) { (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } /* * Skip the rest of the layer and mask information. */ SeekBlob(image, offset + length, SEEK_SET); } /* * If we are only "pinging" the image, then we're done - so return. */ if (image_info->ping != MagickFalse) { (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading the precombined layer"); if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1)) has_merged_image = (MagickBooleanType) ReadPSDMergedImage(image_info, image, &psd_info, exception); if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) && (length != 0)) { SeekBlob(image, offset, SEEK_SET); status = ReadPSDLayers(image, image_info, &psd_info, MagickFalse, exception); if (status != MagickTrue) { (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image * merged; if (GetImageListLength(image) == 1) ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); SetImageAlphaChannel(image, TransparentAlphaChannel, exception); image->background_color.alpha = TransparentAlpha; image->background_color.alpha_trait = BlendPixelTrait; merged = MergeImageLayers(image, FlattenLayer, exception); ReplaceImageInList(&image, merged); } (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RegisterPSDImage() adds properties for the PSD image format to % * the list of supported formats. The properties include the image format % * tag, a method to read and/or write the format, whether the format % * supports the saving of more than one frame to the same file or blob, % * whether the format supports native in-memory I/O, and a brief % * description of the format. % % The format of the RegisterPSDImage method * is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo * entry; entry = AcquireMagickInfo("PSD", "PSB", "Adobe Large Document Format"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); entry = AcquireMagickInfo("PSD", "PSD", "Adobe Photoshop bitmap"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); return (MagickImageCoderSignature); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % U n r e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % UnregisterPSDImage() removes format registrations made by the % * PSD module from the list of supported formats. % % The format of the * UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void)UnregisterMagickInfo("PSB"); (void)UnregisterMagickInfo("PSD"); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % W r i t e P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded * image format. % % The format of the WritePSDImage method is: % % * MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, * % ExceptionInfo *exception) % % A description of each parameter * follows. % % o image_info: the image info. % % o image: The image. * % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo * psd_info, Image * image, const size_t offset) { if (psd_info->version == 1) return (WriteBlobMSBShort(image, (unsigned short)offset)); return (WriteBlobMSBLong(image, (unsigned short)offset)); } static inline ssize_t WritePSDOffset(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset = TellBlob(image); SeekBlob(image, offset, SEEK_SET); if (psd_info->version == 1) result = WriteBlobMSBShort(image, (unsigned short)size); else result = (WriteBlobMSBLong(image, (unsigned short)size)); SeekBlob(image, current_offset, SEEK_SET); return (result); } static inline ssize_t SetPSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size) { if (psd_info->version == 1) return (WriteBlobMSBLong(image, (unsigned int)size)); return (WriteBlobMSBLongLong(image, size)); } static inline ssize_t WritePSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset = TellBlob(image); SeekBlob(image, offset, SEEK_SET); if (psd_info->version == 1) result = WriteBlobMSBLong(image, (unsigned int)size); else result = WriteBlobMSBLongLong(image, size); SeekBlob(image, current_offset, SEEK_SET); return (result); } static size_t PSDPackbitsEncodeImage(Image * image, const size_t length, const unsigned char *pixels, unsigned char *compact_pixels, ExceptionInfo * exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* * Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(pixels != (unsigned char *)NULL); assert(compact_pixels != (unsigned char *)NULL); packbits = (unsigned char *)AcquireQuantumMemory(128UL, sizeof(*packbits)); if (packbits == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); q = compact_pixels; for (i = (ssize_t) length; i != 0;) { switch (i) { case 1: { i--; *q++ = (unsigned char)0; *q++ = (*pixels); break; } case 2: { i -= 2; *q++ = (unsigned char)1; *q++ = (*pixels); *q++ = pixels[1]; break; } case 3: { i -= 3; if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { *q++ = (unsigned char)((256 - 3) + 1); *q++ = (*pixels); break; } *q++ = (unsigned char)2; *q++ = (*pixels); *q++ = pixels[1]; *q++ = pixels[2]; break; } default: { if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { /* * Packed run. */ count = 3; while (((ssize_t) count < i) && (*pixels == *(pixels + count))) { count++; if (count >= 127) break; } i -= count; *q++ = (unsigned char)((256 - count) + 1); *q++ = (*pixels); pixels += count; break; } /* * Literal run. */ count = 0; while ((*(pixels + count) != *(pixels + count + 1)) || (*(pixels + count + 1) != *(pixels + count + 2))) { packbits[count + 1] = pixels[count]; count++; if (((ssize_t) count >= (i - 3)) || (count >= 127)) break; } i -= count; *packbits = (unsigned char)(count - 1); for (j = 0; j <= (ssize_t) count; j++) *q++ = packbits[j]; pixels += count; break; } } } *q++ = (unsigned char)128; /* EOD marker */ packbits = (unsigned char *)RelinquishMagickMemory(packbits); return ((size_t) (q - compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo * psd_info, Image * image, const Image * next_image, const ssize_t channels) { size_t length; ssize_t i, y; if (next_image->compression == RLECompression) { length = WriteBlobMSBShort(image, RLE); for (i = 0; i < channels; i++) for (y = 0; y < (ssize_t) next_image->rows; y++) length += SetPSDOffset(psd_info, image, 0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) length = WriteBlobMSBShort(image, ZipWithoutPrediction); #endif else length = WriteBlobMSBShort(image, Raw); return (length); } static size_t WritePSDChannel(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset, const MagickBooleanType separate, ExceptionInfo * exception) { int y; MagickBooleanType monochrome; QuantumInfo * quantum_info; register const Quantum * p; register ssize_t i; size_t count, length; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels = (unsigned char *)NULL; flush = Z_NO_FLUSH; #endif count = 0; if (separate != MagickFalse) { size_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, 1); } if (next_image->depth > 8) next_image->depth = 16; monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info = AcquireQuantumInfo(image_info, next_image); if (quantum_info == (QuantumInfo *) NULL) return (0); pixels = (unsigned char *)GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels = (unsigned char *)AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *)NULL) { quantum_info = DestroyQuantumInfo(quantum_info); return (0); } ResetMagickMemory(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; level = Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level = (int)image_info->quality; if (deflateInit(&stream, level) != Z_OK) { quantum_info = DestroyQuantumInfo(quantum_info); return (0); } } #endif for (y = 0; y < (ssize_t) next_image->rows; y++) { p = GetVirtualPixels(next_image, 0, y, next_image->columns, 1, exception); if (p == (const Quantum *)NULL) break; length = ExportQuantumPixels(next_image, (CacheView *) NULL, quantum_info, quantum_type, pixels, exception); if (monochrome != MagickFalse) for (i = 0; i < (ssize_t) length; i++) pixels[i] = (~pixels[i]); if (next_image->compression == RLECompression) { length = PSDPackbitsEncodeImage(image, length, pixels, compact_pixels, exception); count += WriteBlob(image, length, compact_pixels); size_offset += WritePSDOffset(psd_info, image, length, size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in = (uInt) length; stream.next_in = (Bytef *) pixels; if (y == (ssize_t) next_image->rows - 1) flush = Z_FINISH; do { stream.avail_out = (uInt) CHUNK; stream.next_out = (Bytef *) compressed_pixels; if (deflate(&stream, flush) == Z_STREAM_ERROR) break; length = (size_t) CHUNK - stream.avail_out; if (length > 0) count += WriteBlob(image, length, compressed_pixels); } while (stream.avail_out == 0); } #endif else count += WriteBlob(image, length, pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void)deflateEnd(&stream); compressed_pixels = (unsigned char *)RelinquishMagickMemory( compressed_pixels); } #endif quantum_info = DestroyQuantumInfo(quantum_info); return (count); } static unsigned char * AcquireCompactPixels(const Image * image, ExceptionInfo * exception) { size_t packet_size; unsigned char *compact_pixels; packet_size = image->depth > 8UL ? 2UL : 1UL; compact_pixels = (unsigned char *)AcquireQuantumMemory((9 * image->columns) + 1, packet_size * sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); } return (compact_pixels); } static size_t WritePSDChannels(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, MagickOffsetType size_offset, const MagickBooleanType separate, ExceptionInfo * exception) { Image * mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count = 0; offset_length = 0; rows_offset = 0; compact_pixels = (unsigned char *)NULL; if (next_image->compression == RLECompression) { compact_pixels = AcquireCompactPixels(next_image, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } channels = 1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels = next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, channels); offset_length = (next_image->rows * (psd_info->version == 1 ? 2 : 4)); } size_offset += 2; if (next_image->storage_class == PseudoClass) { length = WritePSDChannel(psd_info, image_info, image, next_image, IndexQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (IsImageGray(next_image) != MagickFalse) { length = WritePSDChannel(psd_info, image_info, image, next_image, GrayQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); length = WritePSDChannel(psd_info, image_info, image, next_image, RedQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, GreenQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, BlueQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; if (next_image->colorspace == CMYKColorspace) { length = WritePSDChannel(psd_info, image_info, image, next_image, BlackQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length = WritePSDChannel(psd_info, image_info, image, next_image, AlphaQuantum, compact_pixels, rows_offset, separate, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); if (separate != MagickFalse) { const char *property; property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels = AcquireCompactPixels(mask, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } length = WritePSDChannel(psd_info, image_info, image, mask, RedQuantum, compact_pixels, rows_offset, MagickTrue, exception); (void)WritePSDSize(psd_info, image, length, size_offset); count += length; compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); } } } return (count); } static size_t WritePascalString(Image * image, const char *value, size_t padding) { size_t count, length; register ssize_t i; /* * Max length is 255. */ count = 0; length = (strlen(value) > 255UL) ? 255UL : strlen(value); if (length == 0) count += WriteBlobByte(image, 0); else { count += WriteBlobByte(image, (unsigned char)length); count += WriteBlob(image, length, (const unsigned char *)value); } length++; if ((length % padding) == 0) return (count); for (i = 0; i < (ssize_t) (padding - (length % padding)); i++) count += WriteBlobByte(image, 0); return (count); } static void WriteResolutionResourceBlock(Image * image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution = 2.54 * 65536.0 * image->resolution.x + 0.5; y_resolution = 2.54 * 65536.0 * image->resolution.y + 0.5; units = 2; } else { x_resolution = 65536.0 * image->resolution.x + 0.5; y_resolution = 65536.0 * image->resolution.y + 0.5; units = 1; } (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x03ED); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, 16); /* resource size */ (void)WriteBlobMSBLong(image, (unsigned int)(x_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* horizontal resolution unit */ (void)WriteBlobMSBShort(image, units); /* width unit */ (void)WriteBlobMSBLong(image, (unsigned int)(y_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* vertical resolution unit */ (void)WriteBlobMSBShort(image, units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo * psd_info, Image * image, const signed short channel) { size_t count; count = WriteBlobMSBSignedShort(image, channel); count += SetPSDSize(psd_info, image, 0); return (count); } static void RemoveICCProfileFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); if (id == 0x0000040f) { ssize_t quantum; quantum = PSDQuantum(count) + 12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q + quantum < (datum + length - 16))) (void)CopyMagickMemory(q, q + quantum, length - quantum - (q - datum)); SetStringInfoLength(bim_profile, length - quantum); } break; } p += count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; ssize_t cnt; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) return; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); cnt = PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length - 12))) { (void)CopyMagickMemory(q, q + cnt + 12, length - (cnt + 12) - (q - datum)); SetStringInfoLength(bim_profile, length - (cnt + 12)); break; } p += count; if ((count & 0x01) != 0) p++; } } static const StringInfo * GetAdditionalInformation(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* * Whitelist of keys from: * https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo * info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo * profile; unsigned char *p; unsigned int size; info = GetImageProfile(image, "psd:additional-info"); if (info == (const StringInfo *)NULL) return ((const StringInfo *)NULL); option = GetImageOption(image_info, "psd:additional-info"); if (LocaleCompare(option, "all") == 0) return (info); if (LocaleCompare(option, "selective") != 0) { profile = RemoveImageProfile(image, "psd:additional-info"); return (DestroyStringInfo(profile)); } length = GetStringInfoLength(info); p = GetStringInfoDatum(info); remaining_length = length; length = 0; while (remaining_length >= 12) { /* skip over signature */ p += 4; key[0] = (*p++); key[1] = (*p++); key[2] = (*p++); key[3] = (*p++); key[4] = '\0'; size = (unsigned int)(*p++) << 24; size |= (unsigned int)(*p++) << 16; size |= (unsigned int)(*p++) << 8; size |= (unsigned int)(*p++); size = size & 0xffffffff; remaining_length -= 12; if ((size_t) size > remaining_length) return ((const StringInfo *)NULL); found = MagickFalse; for (i = 0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key, allowed[i], PSDKeySize) != 0) continue; found = MagickTrue; break; } remaining_length -= (size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p = (unsigned char *)CopyMagickMemory(p - 12, p + size, remaining_length); continue; } length += (size_t) size + 12; p += size; } profile = RemoveImageProfile(image, "psd:additional-info"); if (length == 0) return (DestroyStringInfo(profile)); SetStringInfoLength(profile, (const size_t)length); SetImageProfile(image, "psd:additional-info", info, exception); return (profile); } static MagickBooleanType WritePSDImage(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo * icc_profile, *info; Image * base_image, *next_image; MagickBooleanType status; MagickOffsetType * layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo * bim_profile; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status = OpenBlob(image_info, image, WriteBinaryBlobMode, exception); if (status == MagickFalse) return (status); packet_size = (size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size += image->depth > 8 ? 2 : 1; psd_info.version = 1; if ((LocaleCompare(image_info->magick, "PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version = 2; (void)WriteBlob(image, 4, (const unsigned char *)"8BPS"); (void)WriteBlobMSBShort(image, psd_info.version); /* version */ for (i = 1; i <= 6; i++) (void)WriteBlobByte(image, 0); /* 6 bytes of reserved */ if (SetImageGray(image, exception) != MagickFalse) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void)SetImageStorageClass(image, DirectClass, exception); if (image->colorspace != CMYKColorspace) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels = (image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void)WriteBlobMSBShort(image, (unsigned short)num_channels); (void)WriteBlobMSBLong(image, (unsigned int)image->rows); (void)WriteBlobMSBLong(image, (unsigned int)image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* * Write depth & mode. */ monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void)WriteBlobMSBShort(image, (unsigned short)(image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void)TransformImageColorspace(image, sRGBColorspace, exception); (void)WriteBlobMSBShort(image, (unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void)TransformImageColorspace(image, CMYKColorspace, exception); (void)WriteBlobMSBShort(image, CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void)WriteBlobMSBLong(image, 0); else { /* * Write PSD raster colormap. */ (void)WriteBlobMSBLong(image, 768); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(image->colormap[i].red)); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar( image->colormap[i].green)); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(image->colormap[i].blue)); for (; i < 256; i++) (void)WriteBlobByte(image, 0); } /* * Image resource block. */ length = 28; /* 0x03EB */ bim_profile = (StringInfo *) GetImageProfile(image, "8bim"); icc_profile = GetImageProfile(image, "icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile = CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length += PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *)NULL) length += PSDQuantum(GetStringInfoLength(icc_profile)) + 12; (void)WriteBlobMSBLong(image, (unsigned int)length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void)WriteBlob(image, GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile = DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x0000040F); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, (unsigned int)GetStringInfoLength( icc_profile)); (void)WriteBlob(image, GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((MagickOffsetType) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void)WriteBlobByte(image, 0); } base_image = GetNextImageInList(image); if (base_image == (Image *) NULL) base_image = image; size = 0; size_offset = TellBlob(image); SetPSDSize(&psd_info, image, 0); SetPSDSize(&psd_info, image, 0); layer_count = 0; for (next_image = base_image; next_image != NULL;) { layer_count++; next_image = GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size += WriteBlobMSBShort(image, -(unsigned short)layer_count); else size += WriteBlobMSBShort(image, (unsigned short)layer_count); layer_size_offsets = (MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count, sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); layer_index = 0; for (next_image = base_image; next_image != NULL;) { Image * mask; unsigned char default_color; unsigned short channels, total_channels; mask = (Image *) NULL; property = GetImageArtifact(next_image, "psd:opacity-mask"); default_color = 0; if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); default_color = strlen(property) == 9 ? 255 : 0; } size += WriteBlobMSBLong(image, (unsigned int)next_image->page.y); size += WriteBlobMSBLong(image, (unsigned int)next_image->page.x); size += WriteBlobMSBLong(image, (unsigned int)(next_image->page.y + next_image->rows)); size += WriteBlobMSBLong(image, (unsigned int)(next_image->page.x + next_image->columns)); channels = 1U; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels = next_image->colorspace == CMYKColorspace ? 4U : 3U; total_channels = channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size += WriteBlobMSBShort(image, total_channels); layer_size_offsets[layer_index++] = TellBlob(image); for (i = 0; i < (ssize_t) channels; i++) size += WriteChannelSize(&psd_info, image, (signed short)i); if (next_image->alpha_trait != UndefinedPixelTrait) size += WriteChannelSize(&psd_info, image, -1); if (mask != (Image *) NULL) size += WriteChannelSize(&psd_info, image, -2); size += WriteBlob(image, 4, (const unsigned char *)"8BIM"); size += WriteBlob(image, 4, (const unsigned char *) CompositeOperatorToPSDBlendMode(next_image->compose)); property = GetImageArtifact(next_image, "psd:layer.opacity"); if (property != (const char *)NULL) { Quantum opacity; opacity = (Quantum) StringToInteger(property); size += WriteBlobByte(image, ScaleQuantumToChar(opacity)); (void)ApplyPSDLayerOpacity(next_image, opacity, MagickTrue, exception); } else size += WriteBlobByte(image, 255); size += WriteBlobByte(image, 0); size += WriteBlobByte(image, next_image->compose == NoCompositeOp ? 1 << 0x02 : 1); /* layer properties - * visible, etc. */ size += WriteBlobByte(image, 0); info = GetAdditionalInformation(image_info, next_image, exception); property = (const char *)GetImageProperty(next_image, "label", exception); if (property == (const char *)NULL) { (void)FormatLocaleString(layer_name, MagickPathExtent, "L%.20g", (double)layer_index); property = layer_name; } name_length = strlen(property) + 1; if ((name_length % 4) != 0) name_length += (4 - (name_length % 4)); if (info != (const StringInfo *)NULL) name_length += GetStringInfoLength(info); name_length += 8; if (mask != (Image *) NULL) name_length += 20; size += WriteBlobMSBLong(image, (unsigned int)name_length); if (mask == (Image *) NULL) size += WriteBlobMSBLong(image, 0); else { if (mask->compose != NoCompositeOp) (void)ApplyPSDOpacityMask(next_image, mask, ScaleCharToQuantum( default_color), MagickTrue, exception); mask->page.y += image->page.y; mask->page.x += image->page.x; size += WriteBlobMSBLong(image, 20); size += WriteBlobMSBSignedLong(image, mask->page.y); size += WriteBlobMSBSignedLong(image, mask->page.x); size += WriteBlobMSBLong(image, (const unsigned int)mask->rows + mask->page.y); size += WriteBlobMSBLong(image, (const unsigned int)mask->columns + mask->page.x); size += WriteBlobByte(image, default_color); size += WriteBlobByte(image, mask->compose == NoCompositeOp ? 2 : 0); size += WriteBlobMSBShort(image, 0); } size += WriteBlobMSBLong(image, 0); size += WritePascalString(image, property, 4); if (info != (const StringInfo *)NULL) size += WriteBlob(image, GetStringInfoLength(info), GetStringInfoDatum(info)); next_image = GetNextImageInList(next_image); } /* * Now the image data! */ next_image = base_image; layer_index = 0; while (next_image != NULL) { length = WritePSDChannels(&psd_info, image_info, image, next_image, layer_size_offsets[layer_index++], MagickTrue, exception); if (length == 0) { status = MagickFalse; break; } size += length; next_image = GetNextImageInList(next_image); } (void)WriteBlobMSBLong(image, 0); /* user mask data */ /* * Write the total size */ size_offset += WritePSDSize(&psd_info, image, size + (psd_info.version == 1 ? 8 : 16), size_offset); if ((size / 2) != ((size + 1) / 2)) rounded_size = size + 1; else rounded_size = size; (void)WritePSDSize(&psd_info, image, rounded_size, size_offset); layer_size_offsets = (MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* * Remove the opacity mask from the registry */ next_image = base_image; while (next_image != (Image *) NULL) { property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) DeleteImageRegistry(property); next_image = GetNextImageInList(next_image); } /* * Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression = image->compression; if (image->compression == ZipCompression) image->compression = RLECompression; if (WritePSDChannels(&psd_info, image_info, image, image, 0, MagickFalse, exception) == 0) status = MagickFalse; image->compression = compression; } (void)CloseBlob(image); return (status); }
DenseMatrix.h
/* * DenseMatrix.h * * Created on: Nov 25, 2015 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * @ingroup algebraic * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std::vector<double> entries; double zero; public: /** Default constructor */ DenseMatrix(); /** * Constructs the DenseMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, double zero = 0.0); /** * Constructs the DenseMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, double zero = 0.0); /** * Constructs the @a dimension x @a dimension DenseMatrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, const std::vector<Triplet>& triplets, double zero = 0.0); /** * Constructs the @a nRows x @a nCols DenseMatrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, const std::vector<Triplet>& triplets, double zero = 0.0); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @param zero The zero element (default is 0.0). * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std::vector<double>& entries, double zero = 0.0); /** Default destructor */ virtual ~DenseMatrix() = default; /** Default copy constructor */ DenseMatrix (const DenseMatrix &other) = default; /** Default move constructor */ DenseMatrix (DenseMatrix &&other) = default; /** Default copy assignment operator */ DenseMatrix& operator=(DenseMatrix &&other) = default; /** Default move assignment operator */ DenseMatrix& operator=(const DenseMatrix &other) = default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. * @note This function is linear in the number of columns of the matrix. */ count nnzInRow(const index i) const; /** * @return Number of non-zeros in this matrix. * @note This function takes nRows * nCols operations. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator()(const index i, const index j) const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i) const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j) const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator+(const DenseMatrix &other) const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix& operator+=(const DenseMatrix &other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator-(const DenseMatrix &other) const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix& operator-=(const DenseMatrix &other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator*(const double &scalar) const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix& operator*=(const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator*(const Vector &vector) const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator*(const DenseMatrix &other) const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator/(const double &divisor) const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix& operator/=(const double &divisor); /** * Transposes this matrix and returns it. */ DenseMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ DenseMatrix extract(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices) const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices, const DenseMatrix& source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template<typename F> void apply(const F unaryElementFunction); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix &matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix &LU, const Vector &b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template<typename L> static DenseMatrix binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void forElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void parallelForElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template<typename L> void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForElementsInRowOrder(L handle) const; /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) time regardless of the number of * non-zeros actually present. */ template<typename L> void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) sequential time regardless of the number * of non-zeros actually present. */ template<typename L> void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) time regardless of the * number of non-zeros actually present. */ template<typename L> void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) sequential time regardless * of the number of non-zeros actually present. */ template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const; }; template<typename F> void DenseMatrix::apply(const F unaryElementFunction) { #pragma omp parallel for for (index k = 0; k < entries.size(); ++k) { entries[k] = unaryElementFunction(entries[k]); } } template<typename L> inline DenseMatrix NetworKit::DenseMatrix::binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std::vector<double> resultEntries(A.numberOfRows() * A.numberOfColumns(), 0.0); #pragma omp parallel for for (index i = 0; i < A.numberOfRows(); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template<typename L> inline void NetworKit::DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template<typename L> inline void NetworKit::DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); #pragma omp parallel for for (index j = 0; j < numberOfColumns(); ++j) { handle(j, entries[offset + j]); } } template<typename L> inline void NetworKit::DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template<typename L> inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) const { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template<typename L> inline void DenseMatrix::forNonZeroElementsInRow(index row, L handle) const { for (index j = 0, k = row * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(j, entries[k]); } } } template<typename L> inline void DenseMatrix::parallelForNonZeroElementsInRow(index row, L handle) const { #pragma omp parallel for for (index j = 0; j < numberOfColumns(); ++j) { index k = row * numberOfColumns() + j; if (entries[k] != getZero()) { handle(j, entries[k]); } } } template<typename L> inline void DenseMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i,j,entries[k]); } } } } template<typename L> inline void DenseMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { #pragma omp parallel for for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i,j,entries[k]); } } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
/* * DenseMatrix.h * * Created on: Nov 25, 2015 Author: Michael Wegner * (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * @ingroup algebraic * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std: : vector < double >entries; double zero; public: /** Default constructor */ DenseMatrix(); /** * Constructs the DenseMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, double zero = 0.0); /** * Constructs the DenseMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, double zero = 0.0); /** * Constructs the @a dimension x @a dimension DenseMatrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs the @a nRows x @a nCols DenseMatrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @param zero The zero element (default is 0.0). * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std: : vector < double >&entries, double zero = 0.0); /** Default destructor */ virtual ~ DenseMatrix() = default; /** Default copy constructor */ DenseMatrix(const DenseMatrix & other)= default; /** Default move constructor */ DenseMatrix(DenseMatrix && other) = default; /** Default copy assignment operator */ DenseMatrix & operator = (DenseMatrix && other) = default; /** Default move assignment operator */ DenseMatrix & operator = (const DenseMatrix & other)= default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. * @note This function is linear in the number of columns of the matrix. */ count nnzInRow(const index i)const; /** * @return Number of non-zeros in this matrix. * @note This function takes nRows * nCols operations. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator() (const index i, const index j)const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i)const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j)const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator + (const DenseMatrix & other)const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix & operator += (const DenseMatrix & other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator - (const DenseMatrix & other)const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix & operator -= (const DenseMatrix & other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator *(const double &scalar)const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix & operator *= (const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator *(const Vector & vector)const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator *(const DenseMatrix & other)const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator / (const double &divisor)const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix & operator /= (const double &divisor); /** * Transposes this matrix and returns it. */ DenseMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ DenseMatrix extract(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices)const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices, const DenseMatrix & source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template < typename F > void apply(const F unaryElementFunction); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix & matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix & LU, const Vector & b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template < typename L > static DenseMatrix binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void forElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void parallelForElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template < typename L > void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle) const; /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) time regardless of the number of * non-zeros actually present. */ template < typename L > void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) sequential time regardless of the number * of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) time regardless of the * number of non-zeros actually present. */ template < typename L > void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) sequential time regardless * of the number of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRowOrder(L handle) const; }; template < typename F > void DenseMatrix::apply(const F unaryElementFunction) { for (index k = 0; k < entries.size(); ++k) { entries[k] = unaryElementFunction(entries[k]); } } template < typename L > inline DenseMatrix NetworKit: : DenseMatrix: : binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std: : vector < double >resultEntries(A.numberOfRows() * A.numberOfColumns(), 0.0); for (index i = 0; i < A.numberOfRows(); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index j = 0; j < numberOfColumns(); ++j) { handle(j, entries[offset + j]); } } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRow(index row, L handle) const { for (index j = 0, k = row * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRow(index row, L handle) const { for (index j = 0; j < numberOfColumns(); ++j) { index k = row * numberOfColumns() + j; if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
/* * DenseMatrix.h * * Created on: Nov 25, 2015 Author: Michael Wegner * (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * @ingroup algebraic * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std: : vector < double >entries; double zero; public: /** Default constructor */ DenseMatrix(); /** * Constructs the DenseMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, double zero = 0.0); /** * Constructs the DenseMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, double zero = 0.0); /** * Constructs the @a dimension x @a dimension DenseMatrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs the @a nRows x @a nCols DenseMatrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @param zero The zero element (default is 0.0). * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std: : vector < double >&entries, double zero = 0.0); /** Default destructor */ virtual ~ DenseMatrix() = default; /** Default copy constructor */ DenseMatrix(const DenseMatrix & other)= default; /** Default move constructor */ DenseMatrix(DenseMatrix && other) = default; /** Default copy assignment operator */ DenseMatrix & operator = (DenseMatrix && other) = default; /** Default move assignment operator */ DenseMatrix & operator = (const DenseMatrix & other)= default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. * @note This function is linear in the number of columns of the matrix. */ count nnzInRow(const index i)const; /** * @return Number of non-zeros in this matrix. * @note This function takes nRows * nCols operations. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator() (const index i, const index j)const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i)const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j)const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator + (const DenseMatrix & other)const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix & operator += (const DenseMatrix & other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator - (const DenseMatrix & other)const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix & operator -= (const DenseMatrix & other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator *(const double &scalar)const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix & operator *= (const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator *(const Vector & vector)const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator *(const DenseMatrix & other)const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator / (const double &divisor)const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix & operator /= (const double &divisor); /** * Transposes this matrix and returns it. */ DenseMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ DenseMatrix extract(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices)const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices, const DenseMatrix & source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template < typename F > void apply(const F unaryElementFunction); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix & matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix & LU, const Vector & b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template < typename L > static DenseMatrix binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void forElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void parallelForElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template < typename L > void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle) const; /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) time regardless of the number of * non-zeros actually present. */ template < typename L > void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) sequential time regardless of the number * of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) time regardless of the * number of non-zeros actually present. */ template < typename L > void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) sequential time regardless * of the number of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRowOrder(L handle) const; }; template < typename F > void DenseMatrix::apply(const F unaryElementFunction) { #pragma omp parallel for for (index k = 0; k < entries.size(); ++k) { entries[k] = unaryElementFunction(entries[k]); } } template < typename L > inline DenseMatrix NetworKit: : DenseMatrix: : binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std: : vector < double >resultEntries(A.numberOfRows() * A.numberOfColumns(), 0.0); #pragma omp parallel for for (index i = 0; i < A.numberOfRows(); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); #pragma omp parallel for for (index j = 0; j < numberOfColumns(); ++j) { handle(j, entries[offset + j]); } } template < typename L > inline void NetworKit::DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) const { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRow(index row, L handle) const { for (index j = 0, k = row * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRow(index row, L handle) const { #pragma omp parallel for for (index j = 0; j < numberOfColumns(); ++j) { index k = row * numberOfColumns() + j; if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { #pragma omp parallel for for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
common.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <omp.h> #include <xray.h> #include "common.h" #define CONF95 1.96 int nthreads = -1; // Number of OpenMP threads int delaylength = -1; // The number of iterations to delay for int outerreps = -1; // Outer repetitions double delaytime = -1.0; // Length of time to delay for in microseconds double targettesttime = 0.0; // The length of time in microseconds that the test // should run for. unsigned long innerreps; // Inner repetitions double *times; // Array of doubles storing the benchmark times in microseconds double referencetime; // The average reference time in microseconds to perform // outerreps runs double referencesd; // The standard deviation in the reference time in // microseconds for outerreps runs. double testtime; // The average test time in microseconds for // outerreps runs double testsd; // The standard deviation in the test time in // microseconds for outerreps runs. void usage(char *argv[]) { printf("Usage: %s.x \n" "\t--outer-repetitions <outer-repetitions> (default %d)\n" "\t--test-time <target-test-time> (default %0.2f microseconds)\n" "\t--delay-time <delay-time> (default %0.4f microseconds)\n" "\t--delay-length <delay-length> " "(default auto-generated based on processor speed)\n", argv[0], DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME); } void parse_args(int argc, char *argv[]) { // Parse the parameters int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--delay-time") == 0.0) { delaytime = atof(argv[++arg]); if (delaytime == 0.0) { printf("Invalid float:--delay-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--outer-repetitions") == 0) { outerreps = atoi(argv[++arg]); if (outerreps == 0) { printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--test-time") == 0) { targettesttime = atof(argv[++arg]); if (targettesttime == 0) { printf("Invalid integer:--test-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } int getdelaylengthfromtime(double delaytime) { int i, reps; double lapsedtime, starttime; // seconds reps = 1000; lapsedtime = 0.0; delaytime = delaytime/1.0E6; // convert from microseconds to seconds // Note: delaytime is local to this function and thus the conversion // does not propagate to the main code. // Here we want to use the delaytime in microseconds to find the // delaylength in iterations. We start with delaylength=0 and // increase until we get a large enough delaytime, return delaylength // in iterations. delaylength = 0; delay(delaylength); while (lapsedtime < delaytime) { delaylength = delaylength * 1.1 + 1; starttime = getclock(); for (i = 0; i < reps; i++) { delay(delaylength); } lapsedtime = (getclock() - starttime) / (double) reps; } return delaylength; } unsigned long getinnerreps(void (*test)(void)) { innerreps = 10L; // some initial value double time = 0.0; while (time < targettesttime) { double start = getclock(); test(); time = (getclock() - start) * 1.0e6; innerreps *=2; // Test to stop code if compiler is optimising reference time expressions away if (innerreps > (targettesttime*1.0e15)) { printf("Compiler has optimised reference loop away, STOP! \n"); printf("Try recompiling with lower optimisation level \n"); exit(1); } } return innerreps; } void printheader(char *name) { printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing %s time using %lu reps\n", name, innerreps); } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff; int i, nr; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; for (i = 1; i <= outerreps; i++) { mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; } meantime = totaltime / outerreps; sumsq = 0; for (i = 1; i <= outerreps; i++) { sumsq += (times[i] - meantime) * (times[i] - meantime); } sd = sqrt(sumsq / (outerreps - 1)); cutoff = 3.0 * sd; nr = 0; for (i = 1; i <= outerreps; i++) { if (fabs(times[i] - meantime) > cutoff) nr++; } printf("\n"); printf("Sample_size Average Min Max S.D. Outliers\n"); printf(" %d %f %f %f %f %d\n", outerreps, meantime, mintime, maxtime, sd, nr); printf("\n"); *mtp = meantime; *sdp = sd; } void printfooter(char *name, double testtime, double testsd, double referencetime, double refsd) { printf("%s time = %f microseconds +/- %f\n", name, testtime, CONF95*testsd); printf("%s overhead = %f microseconds +/- %f\n", name, testtime-referencetime, CONF95*(testsd+referencesd)); } void printreferencefooter(char *name, double referencetime, double referencesd) { printf("%s time = %f microseconds +/- %f\n", name, referencetime, CONF95 * referencesd); } void init(int argc, char **argv) { #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } parse_args(argc, argv); if (outerreps == -1) { outerreps = DEFAULT_OUTER_REPS; } if (targettesttime == 0.0) { targettesttime = DEFAULT_TEST_TARGET_TIME; } if (delaytime == -1.0) { delaytime = DEFAULT_DELAY_TIME; } delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations times = malloc((outerreps+1) * sizeof(double)); printf("Running OpenMP benchmark version 3.0\n" "\t%d thread(s)\n" "\t%d outer repetitions\n" "\t%0.2f test time (microseconds)\n" "\t%d delay length (iterations) \n" "\t%f delay time (microseconds)\n", nthreads, outerreps, targettesttime, delaylength, delaytime); } void finalise(void) { free(times); } void initreference(char *name) { printheader(name); } /* Calculate the reference time. */ void reference(char *name, void (*refer)(void)) { int k; double start; XRayLabelFrame(name); // Calculate the required number of innerreps innerreps = getinnerreps(refer); initreference(name); for (k = 0; k <= outerreps; k++) { start = getclock(); refer(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisereference(name); } void finalisereference(char *name) { stats(&referencetime, &referencesd); printreferencefooter(name, referencetime, referencesd); } void intitest(char *name) { printheader(name); } void finalisetest(char *name) { stats(&testtime, &testsd); printfooter(name, testtime, testsd, referencetime, referencesd); } /* Function to run a microbenchmark test*/ void benchmark(char *name, void (*test)(void)) { int k; double start; // Calculate the required number of innerreps innerreps = getinnerreps(test); intitest(name); XRayLabelFrame(name); for (k=0; k<=outerreps; k++) { start = getclock(); test(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisetest(name); } // For the Cray compiler on HECToR we need to turn off optimisation // for the delay and array_delay functions. Other compilers should // not be afffected. #pragma _CRI noopt void delay(int delaylength) { int i; float a = 0.; for (i = 0; i < delaylength; i++) a += i; if (a < 0) printf("%f \n", a); } void array_delay(int delaylength, double a[1]) { int i; a[0] = 1.0; for (i = 0; i < delaylength; i++) a[0] += i; if (a[0] < 0) printf("%f \n", a[0]); } // Re-enable optimisation for remainder of source. #pragma _CRI opt double getclock() { double time; // Returns a value in seconds of the time elapsed from some arbitrary, // but consistent point. double omp_get_wtime(void); time = omp_get_wtime(); return time; } int returnfalse() { return 0; }
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <omp.h> #include <xray.h> #include "common.h" #define CONF95 1.96 int nthreads = -1; //Number of OpenMP threads int delaylength = -1; //The number of iterations to delay for int outerreps = -1; //Outer repetitions double delaytime = -1.0; //Length of time to delay for in microseconds double targettesttime = 0.0; //The length of time in microseconds that the test // should run for. unsigned long innerreps; //Inner repetitions double *times; //Array of doubles storing the benchmark times in microseconds double referencetime; //The average reference time in microseconds to perform // outerreps runs double referencesd; //The standard deviation in the reference time in // microseconds for outerreps runs. double testtime; //The average test time in microseconds for //outerreps runs double testsd; //The standard deviation in the test time in // microseconds for outerreps runs. void usage(char *argv[]) { printf("Usage: %s.x \n" "\t--outer-repetitions <outer-repetitions> (default %d)\n" "\t--test-time <target-test-time> (default %0.2f microseconds)\n" "\t--delay-time <delay-time> (default %0.4f microseconds)\n" "\t--delay-length <delay-length> " "(default auto-generated based on processor speed)\n", argv[0], DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME); } void parse_args(int argc, char *argv[]) { //Parse the parameters int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--delay-time") == 0.0) { delaytime = atof(argv[++arg]); if (delaytime == 0.0) { printf("Invalid float:--delay-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--outer-repetitions") == 0) { outerreps = atoi(argv[++arg]); if (outerreps == 0) { printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--test-time") == 0) { targettesttime = atof(argv[++arg]); if (targettesttime == 0) { printf("Invalid integer:--test-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } int getdelaylengthfromtime(double delaytime) { int i, reps; double lapsedtime, starttime; //seconds reps = 1000; lapsedtime = 0.0; delaytime = delaytime / 1.0E6; //convert from microseconds to seconds // Note:delaytime is local to this function and thus the conversion // does not propagate to the main code. // Here we want to use the delaytime in microseconds to find the // delaylength in iterations.We start with delaylength = 0 and // increase until we get a large enough delaytime, return delaylength // in iterations. delaylength = 0; delay(delaylength); while (lapsedtime < delaytime) { delaylength = delaylength * 1.1 + 1; starttime = getclock(); for (i = 0; i < reps; i++) { delay(delaylength); } lapsedtime = (getclock() - starttime) / (double)reps; } return delaylength; } unsigned long getinnerreps(void (*test) (void)) { innerreps = 10L; //some initial value double time = 0.0; while (time < targettesttime) { double start = getclock(); test(); time = (getclock() - start) * 1.0e6; innerreps *= 2; //Test to stop code if compiler is optimising reference time expressions away if (innerreps > (targettesttime * 1.0e15)) { printf("Compiler has optimised reference loop away, STOP! \n"); printf("Try recompiling with lower optimisation level \n"); exit(1); } } return innerreps; } void printheader(char *name) { printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing %s time using %lu reps\n", name, innerreps); } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff; int i, nr; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; for (i = 1; i <= outerreps; i++) { mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; } meantime = totaltime / outerreps; sumsq = 0; for (i = 1; i <= outerreps; i++) { sumsq += (times[i] - meantime) * (times[i] - meantime); } sd = sqrt(sumsq / (outerreps - 1)); cutoff = 3.0 * sd; nr = 0; for (i = 1; i <= outerreps; i++) { if (fabs(times[i] - meantime) > cutoff) nr++; } printf("\n"); printf("Sample_size Average Min Max S.D. Outliers\n"); printf(" %d %f %f %f %f %d\n", outerreps, meantime, mintime, maxtime, sd, nr); printf("\n"); *mtp = meantime; *sdp = sd; } void printfooter(char *name, double testtime, double testsd, double referencetime, double refsd) { printf("%s time = %f microseconds +/- %f\n", name, testtime, CONF95 * testsd); printf("%s overhead = %f microseconds +/- %f\n", name, testtime - referencetime, CONF95 * (testsd + referencesd)); } void printreferencefooter(char *name, double referencetime, double referencesd) { printf("%s time = %f microseconds +/- %f\n", name, referencetime, CONF95 * referencesd); } void init(int argc, char **argv) { #pragma omp master { nthreads = omp_get_num_threads(); } parse_args(argc, argv); if (outerreps == -1) { outerreps = DEFAULT_OUTER_REPS; } if (targettesttime == 0.0) { targettesttime = DEFAULT_TEST_TARGET_TIME; } if (delaytime == -1.0) { delaytime = DEFAULT_DELAY_TIME; } delaylength = getdelaylengthfromtime(delaytime); //Always need to compute delaylength in iterations times = malloc((outerreps + 1) * sizeof(double)); printf("Running OpenMP benchmark version 3.0\n" "\t%d thread(s)\n" "\t%d outer repetitions\n" "\t%0.2f test time (microseconds)\n" "\t%d delay length (iterations) \n" "\t%f delay time (microseconds)\n", nthreads, outerreps, targettesttime, delaylength, delaytime); } void finalise(void) { free(times); } void initreference(char *name) { printheader(name); } /* Calculate the reference time. */ void reference(char *name, void (*refer) (void)) { int k; double start; XRayLabelFrame(name); //Calculate the required number of innerreps innerreps = getinnerreps(refer); initreference(name); for (k = 0; k <= outerreps; k++) { start = getclock(); refer(); times[k] = (getclock() - start) * 1.0e6 / (double)innerreps; } finalisereference(name); } void finalisereference(char *name) { stats(&referencetime, &referencesd); printreferencefooter(name, referencetime, referencesd); } void intitest(char *name) { printheader(name); } void finalisetest(char *name) { stats(&testtime, &testsd); printfooter(name, testtime, testsd, referencetime, referencesd); } /* Function to run a microbenchmark test */ void benchmark(char *name, void (*test) (void)) { int k; double start; //Calculate the required number of innerreps innerreps = getinnerreps(test); intitest(name); XRayLabelFrame(name); for (k = 0; k <= outerreps; k++) { start = getclock(); test(); times[k] = (getclock() - start) * 1.0e6 / (double)innerreps; } finalisetest(name); } //For the Cray compiler on HECToR we need to turn off optimisation // for the delay and array_delay functions.Other compilers should // not be afffected. #pragma _CRI noopt void delay(int delaylength) { int i; float a = 0.; for (i = 0; i < delaylength; i++) a += i; if (a < 0) printf("%f \n", a); } void array_delay(int delaylength, double a[1]) { int i; a[0] = 1.0; for (i = 0; i < delaylength; i++) a[0] += i; if (a[0] < 0) printf("%f \n", a[0]); } //Re - enable optimisation for remainder of source. #pragma _CRI opt double getclock() { double time; //Returns a value in seconds of the time elapsed from some arbitrary, //but consistent point. double omp_get_wtime(void); time = omp_get_wtime(); return time; } int returnfalse() { return 0; }
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <omp.h> #include <xray.h> #include "common.h" #define CONF95 1.96 int nthreads = -1; //Number of OpenMP threads int delaylength = -1; //The number of iterations to delay for int outerreps = -1; //Outer repetitions double delaytime = -1.0; //Length of time to delay for in microseconds double targettesttime = 0.0; //The length of time in microseconds that the test // should run for. unsigned long innerreps; //Inner repetitions double *times; //Array of doubles storing the benchmark times in microseconds double referencetime; //The average reference time in microseconds to perform // outerreps runs double referencesd; //The standard deviation in the reference time in // microseconds for outerreps runs. double testtime; //The average test time in microseconds for //outerreps runs double testsd; //The standard deviation in the test time in // microseconds for outerreps runs. void usage(char *argv[]) { printf("Usage: %s.x \n" "\t--outer-repetitions <outer-repetitions> (default %d)\n" "\t--test-time <target-test-time> (default %0.2f microseconds)\n" "\t--delay-time <delay-time> (default %0.4f microseconds)\n" "\t--delay-length <delay-length> " "(default auto-generated based on processor speed)\n", argv[0], DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME); } void parse_args(int argc, char *argv[]) { //Parse the parameters int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--delay-time") == 0.0) { delaytime = atof(argv[++arg]); if (delaytime == 0.0) { printf("Invalid float:--delay-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--outer-repetitions") == 0) { outerreps = atoi(argv[++arg]); if (outerreps == 0) { printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--test-time") == 0) { targettesttime = atof(argv[++arg]); if (targettesttime == 0) { printf("Invalid integer:--test-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } int getdelaylengthfromtime(double delaytime) { int i, reps; double lapsedtime, starttime; //seconds reps = 1000; lapsedtime = 0.0; delaytime = delaytime / 1.0E6; //convert from microseconds to seconds // Note:delaytime is local to this function and thus the conversion // does not propagate to the main code. // Here we want to use the delaytime in microseconds to find the // delaylength in iterations.We start with delaylength = 0 and // increase until we get a large enough delaytime, return delaylength // in iterations. delaylength = 0; delay(delaylength); while (lapsedtime < delaytime) { delaylength = delaylength * 1.1 + 1; starttime = getclock(); for (i = 0; i < reps; i++) { delay(delaylength); } lapsedtime = (getclock() - starttime) / (double)reps; } return delaylength; } unsigned long getinnerreps(void (*test) (void)) { innerreps = 10L; //some initial value double time = 0.0; while (time < targettesttime) { double start = getclock(); test(); time = (getclock() - start) * 1.0e6; innerreps *= 2; //Test to stop code if compiler is optimising reference time expressions away if (innerreps > (targettesttime * 1.0e15)) { printf("Compiler has optimised reference loop away, STOP! \n"); printf("Try recompiling with lower optimisation level \n"); exit(1); } } return innerreps; } void printheader(char *name) { printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing %s time using %lu reps\n", name, innerreps); } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff; int i, nr; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; for (i = 1; i <= outerreps; i++) { mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; } meantime = totaltime / outerreps; sumsq = 0; for (i = 1; i <= outerreps; i++) { sumsq += (times[i] - meantime) * (times[i] - meantime); } sd = sqrt(sumsq / (outerreps - 1)); cutoff = 3.0 * sd; nr = 0; for (i = 1; i <= outerreps; i++) { if (fabs(times[i] - meantime) > cutoff) nr++; } printf("\n"); printf("Sample_size Average Min Max S.D. Outliers\n"); printf(" %d %f %f %f %f %d\n", outerreps, meantime, mintime, maxtime, sd, nr); printf("\n"); *mtp = meantime; *sdp = sd; } void printfooter(char *name, double testtime, double testsd, double referencetime, double refsd) { printf("%s time = %f microseconds +/- %f\n", name, testtime, CONF95 * testsd); printf("%s overhead = %f microseconds +/- %f\n", name, testtime - referencetime, CONF95 * (testsd + referencesd)); } void printreferencefooter(char *name, double referencetime, double referencesd) { printf("%s time = %f microseconds +/- %f\n", name, referencetime, CONF95 * referencesd); } void init(int argc, char **argv) { #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } parse_args(argc, argv); if (outerreps == -1) { outerreps = DEFAULT_OUTER_REPS; } if (targettesttime == 0.0) { targettesttime = DEFAULT_TEST_TARGET_TIME; } if (delaytime == -1.0) { delaytime = DEFAULT_DELAY_TIME; } delaylength = getdelaylengthfromtime(delaytime); //Always need to compute delaylength in iterations times = malloc((outerreps + 1) * sizeof(double)); printf("Running OpenMP benchmark version 3.0\n" "\t%d thread(s)\n" "\t%d outer repetitions\n" "\t%0.2f test time (microseconds)\n" "\t%d delay length (iterations) \n" "\t%f delay time (microseconds)\n", nthreads, outerreps, targettesttime, delaylength, delaytime); } void finalise(void) { free(times); } void initreference(char *name) { printheader(name); } /* Calculate the reference time. */ void reference(char *name, void (*refer) (void)) { int k; double start; XRayLabelFrame(name); //Calculate the required number of innerreps innerreps = getinnerreps(refer); initreference(name); for (k = 0; k <= outerreps; k++) { start = getclock(); refer(); times[k] = (getclock() - start) * 1.0e6 / (double)innerreps; } finalisereference(name); } void finalisereference(char *name) { stats(&referencetime, &referencesd); printreferencefooter(name, referencetime, referencesd); } void intitest(char *name) { printheader(name); } void finalisetest(char *name) { stats(&testtime, &testsd); printfooter(name, testtime, testsd, referencetime, referencesd); } /* Function to run a microbenchmark test */ void benchmark(char *name, void (*test) (void)) { int k; double start; //Calculate the required number of innerreps innerreps = getinnerreps(test); intitest(name); XRayLabelFrame(name); for (k = 0; k <= outerreps; k++) { start = getclock(); test(); times[k] = (getclock() - start) * 1.0e6 / (double)innerreps; } finalisetest(name); } //For the Cray compiler on HECToR we need to turn off optimisation // for the delay and array_delay functions.Other compilers should // not be afffected. #pragma _CRI noopt void delay(int delaylength) { int i; float a = 0.; for (i = 0; i < delaylength; i++) a += i; if (a < 0) printf("%f \n", a); } void array_delay(int delaylength, double a[1]) { int i; a[0] = 1.0; for (i = 0; i < delaylength; i++) a[0] += i; if (a[0] < 0) printf("%f \n", a[0]); } //Re - enable optimisation for remainder of source. #pragma _CRI opt double getclock() { double time; //Returns a value in seconds of the time elapsed from some arbitrary, //but consistent point. double omp_get_wtime(void); time = omp_get_wtime(); return time; } int returnfalse() { return 0; }
parallelizer.h
// (C) Copyright Renaud Detry 2007-2015. // Distributed under the GNU General Public License and under the // BSD 3-Clause License (See accompanying file LICENSE.txt). /** @file */ #ifndef NUKLEI_PARALLELIZER_H #define NUKLEI_PARALLELIZER_H #include <nuklei/Random.h> #include <nuklei/Common.h> #include <nuklei/BoostSerialization.h> #include <nuklei/parallelizer_decl.h> #include <cstdlib> #include <boost/filesystem.hpp> #include <boost/asio.hpp> #include <boost/thread.hpp> namespace nuklei { template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_openmp(Callable callable, PrintAccessor pa) const { std::vector<R> retv; #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < n_; ++i) { R tmp = callable(); #ifdef _OPENMP #pragma omp critical(nuklei_parallelizer_merge) #endif { retv.push_back(tmp); NUKLEI_INFO("Finished OpenMP thread " << i << " with value " << pa(tmp) << "."); } } return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_fork(Callable callable, PrintAccessor pa) const { boost::filesystem::path endpoint_name = boost::filesystem::unique_path("/tmp/nuklei-%%%%-%%%%-%%%%-%%%%"); //std::vector<pid_t> pids(n_, 0); std::vector<R> retv; for (int i = 0; i < n_; i++) { pid_t pid = fork(); NUKLEI_ASSERT(pid >= 0); if (pid == 0) { Random::seed(seed_+i); // unsigned overflow wraps around. using boost::asio::local::stream_protocol; R tmp = callable(); { stream_protocol::endpoint ep(endpoint_name.native()); stream_protocol::iostream stream(ep); NUKLEI_SERIALIZATION_BINARY_OARCHIVE oa(stream); oa & i & NUKLEI_SERIALIZATION_NVP(tmp); } _exit(0); } else { //pids.at(i) = pid; } } using boost::asio::local::stream_protocol; stream_protocol::endpoint ep(endpoint_name.native()); boost::asio::io_service io_service; stream_protocol::acceptor acceptor(io_service, ep); for (int i = 0; i < n_; i++) { R tmp; int fork_i = 0; { stream_protocol::iostream stream; acceptor.accept(*stream.rdbuf()); NUKLEI_SERIALIZATION_BINARY_IARCHIVE ia(stream); ia & fork_i & NUKLEI_SERIALIZATION_NVP(tmp); } retv.push_back(tmp); NUKLEI_INFO("Finished fork " << fork_i << " with value " << pa(tmp) << "."); } // clean up: boost::filesystem::remove(endpoint_name); reap(); return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_pthread(Callable callable, PrintAccessor pa) const { std::vector<R> retv(n_); std::vector< boost::shared_ptr<boost::thread> > threads; for (int i = 0; i < n_; ++i) { // This does not work without the first boost::ref (even if bind<void> // is used. The solution is to parametrize run_pthread_stub with // boost::_bi::protected_bind_t<C>, and boost::protect(callable), // but _bi is not public. //boost::shared_ptr<boost::thread> thrd //(new boost::thread(boost::bind(run_pthread_stub<R, Callable>, // boost::ref(callable), // boost::ref(retv.at(i))))); // For future ref, here's the helper function: //template<typename R, typename Callable> //void run_pthread_stub(Callable callable, R& ret) //{ // ret = callable(); //} boost::shared_ptr<boost::thread> thread (new boost::thread (boost::bind<void>(pthread_wrapper<R, Callable>(callable), boost::ref(retv.at(i))))); threads.push_back(thread); } for (int i = 0; i < n_; ++i) { threads.at(i)->join(); NUKLEI_INFO("Finished thread " << i << " with value " << pa(retv.at(i)) << "."); } return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_single(Callable callable, PrintAccessor pa) const { std::vector<R> retv; for (int i = 0; i < n_; ++i) { R tmp = callable(); retv.push_back(tmp); NUKLEI_INFO("Finished slice " << i << " with value " << pa(tmp) << "."); } return retv; } } #endif
// (C) Copyright Renaud Detry 2007-2015. // Distributed under the GNU General Public License and under the // BSD 3-Clause License (See accompanying file LICENSE.txt). /** @file */ #ifndef NUKLEI_PARALLELIZER_H #define NUKLEI_PARALLELIZER_H #include <nuklei/Random.h> #include <nuklei/Common.h> #include <nuklei/BoostSerialization.h> #include <nuklei/parallelizer_decl.h> #include <cstdlib> #include <boost/filesystem.hpp> #include <boost/asio.hpp> #include <boost/thread.hpp> namespace nuklei { template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_openmp(Callable callable, PrintAccessor pa) const { std::vector<R> retv; for (int i = 0; i < n_; ++i) { R tmp = callable(); { retv.push_back(tmp); NUKLEI_INFO("Finished OpenMP thread " << i << " with value " << pa(tmp) << "."); } } return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_fork(Callable callable, PrintAccessor pa) const { boost::filesystem::path endpoint_name = boost::filesystem::unique_path("/tmp/nuklei-%%%%-%%%%-%%%%-%%%%"); //std::vector<pid_t> pids(n_, 0); std::vector<R> retv; for (int i = 0; i < n_; i++) { pid_t pid = fork(); NUKLEI_ASSERT(pid >= 0); if (pid == 0) { Random::seed(seed_+i); // unsigned overflow wraps around. using boost::asio::local::stream_protocol; R tmp = callable(); { stream_protocol::endpoint ep(endpoint_name.native()); stream_protocol::iostream stream(ep); NUKLEI_SERIALIZATION_BINARY_OARCHIVE oa(stream); oa & i & NUKLEI_SERIALIZATION_NVP(tmp); } _exit(0); } else { //pids.at(i) = pid; } } using boost::asio::local::stream_protocol; stream_protocol::endpoint ep(endpoint_name.native()); boost::asio::io_service io_service; stream_protocol::acceptor acceptor(io_service, ep); for (int i = 0; i < n_; i++) { R tmp; int fork_i = 0; { stream_protocol::iostream stream; acceptor.accept(*stream.rdbuf()); NUKLEI_SERIALIZATION_BINARY_IARCHIVE ia(stream); ia & fork_i & NUKLEI_SERIALIZATION_NVP(tmp); } retv.push_back(tmp); NUKLEI_INFO("Finished fork " << fork_i << " with value " << pa(tmp) << "."); } // clean up: boost::filesystem::remove(endpoint_name); reap(); return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_pthread(Callable callable, PrintAccessor pa) const { std::vector<R> retv(n_); std::vector< boost::shared_ptr<boost::thread> > threads; for (int i = 0; i < n_; ++i) { // This does not work without the first boost::ref (even if bind<void> // is used. The solution is to parametrize run_pthread_stub with // boost::_bi::protected_bind_t<C>, and boost::protect(callable), // but _bi is not public. //boost::shared_ptr<boost::thread> thrd //(new boost::thread(boost::bind(run_pthread_stub<R, Callable>, // boost::ref(callable), // boost::ref(retv.at(i))))); // For future ref, here's the helper function: //template<typename R, typename Callable> //void run_pthread_stub(Callable callable, R& ret) //{ // ret = callable(); //} boost::shared_ptr<boost::thread> thread (new boost::thread (boost::bind<void>(pthread_wrapper<R, Callable>(callable), boost::ref(retv.at(i))))); threads.push_back(thread); } for (int i = 0; i < n_; ++i) { threads.at(i)->join(); NUKLEI_INFO("Finished thread " << i << " with value " << pa(retv.at(i)) << "."); } return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_single(Callable callable, PrintAccessor pa) const { std::vector<R> retv; for (int i = 0; i < n_; ++i) { R tmp = callable(); retv.push_back(tmp); NUKLEI_INFO("Finished slice " << i << " with value " << pa(tmp) << "."); } return retv; } } #endif
// (C) Copyright Renaud Detry 2007-2015. // Distributed under the GNU General Public License and under the // BSD 3-Clause License (See accompanying file LICENSE.txt). /** @file */ #ifndef NUKLEI_PARALLELIZER_H #define NUKLEI_PARALLELIZER_H #include <nuklei/Random.h> #include <nuklei/Common.h> #include <nuklei/BoostSerialization.h> #include <nuklei/parallelizer_decl.h> #include <cstdlib> #include <boost/filesystem.hpp> #include <boost/asio.hpp> #include <boost/thread.hpp> namespace nuklei { template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_openmp(Callable callable, PrintAccessor pa) const { std::vector<R> retv; #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < n_; ++i) { R tmp = callable(); #ifdef _OPENMP #pragma omp critical(nuklei_parallelizer_merge) #endif { retv.push_back(tmp); NUKLEI_INFO("Finished OpenMP thread " << i << " with value " << pa(tmp) << "."); } } return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_fork(Callable callable, PrintAccessor pa) const { boost::filesystem::path endpoint_name = boost::filesystem::unique_path("/tmp/nuklei-%%%%-%%%%-%%%%-%%%%"); //std::vector<pid_t> pids(n_, 0); std::vector<R> retv; for (int i = 0; i < n_; i++) { pid_t pid = fork(); NUKLEI_ASSERT(pid >= 0); if (pid == 0) { Random::seed(seed_+i); // unsigned overflow wraps around. using boost::asio::local::stream_protocol; R tmp = callable(); { stream_protocol::endpoint ep(endpoint_name.native()); stream_protocol::iostream stream(ep); NUKLEI_SERIALIZATION_BINARY_OARCHIVE oa(stream); oa & i & NUKLEI_SERIALIZATION_NVP(tmp); } _exit(0); } else { //pids.at(i) = pid; } } using boost::asio::local::stream_protocol; stream_protocol::endpoint ep(endpoint_name.native()); boost::asio::io_service io_service; stream_protocol::acceptor acceptor(io_service, ep); for (int i = 0; i < n_; i++) { R tmp; int fork_i = 0; { stream_protocol::iostream stream; acceptor.accept(*stream.rdbuf()); NUKLEI_SERIALIZATION_BINARY_IARCHIVE ia(stream); ia & fork_i & NUKLEI_SERIALIZATION_NVP(tmp); } retv.push_back(tmp); NUKLEI_INFO("Finished fork " << fork_i << " with value " << pa(tmp) << "."); } // clean up: boost::filesystem::remove(endpoint_name); reap(); return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_pthread(Callable callable, PrintAccessor pa) const { std::vector<R> retv(n_); std::vector< boost::shared_ptr<boost::thread> > threads; for (int i = 0; i < n_; ++i) { // This does not work without the first boost::ref (even if bind<void> // is used. The solution is to parametrize run_pthread_stub with // boost::_bi::protected_bind_t<C>, and boost::protect(callable), // but _bi is not public. //boost::shared_ptr<boost::thread> thrd //(new boost::thread(boost::bind(run_pthread_stub<R, Callable>, // boost::ref(callable), // boost::ref(retv.at(i))))); // For future ref, here's the helper function: //template<typename R, typename Callable> //void run_pthread_stub(Callable callable, R& ret) //{ // ret = callable(); //} boost::shared_ptr<boost::thread> thread (new boost::thread (boost::bind<void>(pthread_wrapper<R, Callable>(callable), boost::ref(retv.at(i))))); threads.push_back(thread); } for (int i = 0; i < n_; ++i) { threads.at(i)->join(); NUKLEI_INFO("Finished thread " << i << " with value " << pa(retv.at(i)) << "."); } return retv; } template<typename R, typename Callable, typename PrintAccessor> std::vector<R> parallelizer::run_single(Callable callable, PrintAccessor pa) const { std::vector<R> retv; for (int i = 0; i < n_; ++i) { R tmp = callable(); retv.push_back(tmp); NUKLEI_INFO("Finished slice " << i << " with value " << pa(tmp) << "."); } return retv; } } #endif
uct.c
/* Heuristic UCT-RAVE implementation. With RAVE and criticality. Playout is limited with dynamic offset depending on stone count. Cutoff playouts are rated. Playouts are cut short with a mercy threshold (like pachi, orego and others). Initilizes expanded states with prior values. Last-good-reply with forgetting (LGRF1) is also used. A virtual loss is also added on play traversion, that is later corrected if needed. MCTS can be resumed on demand by a few extra simulations at a time. It can also record the average final score, for the purpose of score estimation. */ #include "config.h" #include <stdio.h> #include <string.h> #include <math.h> /* for round, sqrt */ #include <stdlib.h> #include <assert.h> #include <omp.h> #include "alloc.h" #include "amaf_rave.h" #include "board.h" #include "cfg_board.h" #include "constants.h" #include "flog.h" #include "mcts.h" #include "move.h" #include "pat3.h" #include "playout.h" #include "priors.h" #include "pts_file.h" #include "randg.h" #include "scoring.h" #include "state_changes.h" #include "stringm.h" #include "timem.h" #include "transpositions.h" #include "types.h" #include "zobrist.h" /* from board_constants */ extern u8 distances_to_border[TOTAL_BOARD_SIZ]; extern move_seq nei_dst_3[TOTAL_BOARD_SIZ]; static bool ran_out_of_memory; static bool search_stop; static u16 max_depths[MAXIMUM_NUM_THREADS]; /* Whether a MCTS can be started on background. Is disabled if memory runs out, and needs to be reset before testing again if can be run. */ static bool mcts_can_resume = true; static bool uct_inited = false; /* Initiate MCTS dependencies. */ void mcts_init() { if (uct_inited) { return; } rand_init(); board_constants_init(); zobrist_init(); pat3_init(); tt_init(); load_starting_points(); uct_inited = true; } static void select_play( tt_stats * stats, tt_play ** play ) { if (*play != NULL && (*play)->lgrf1_reply != NULL) { *play = (*play)->lgrf1_reply; return; } tt_play * best_plays[TOTAL_BOARD_SIZ]; double best_q = -1.0; u16 equal_quality_plays = 0; for (move k = 0; k < stats->plays_count; ++k) { #if USE_AMAF_RAVE double play_q = uct1_rave(&stats->plays[k]); #else double play_q = stats->plays[k].mc_q; #endif double uct_q = play_q; if (uct_q > best_q) { best_plays[0] = &stats->plays[k]; equal_quality_plays = 1; best_q = uct_q; } else if (uct_q == best_q) { best_plays[equal_quality_plays] = &stats->plays[k]; ++equal_quality_plays; } } if (equal_quality_plays == 1) { *play = best_plays[0]; return; } if (equal_quality_plays > 1) { u16 p = rand_u16(equal_quality_plays); *play = best_plays[p]; return; } flog_crit("mcts", "play selection exception"); } static d16 mcts_expansion( cfg_board * cb, bool is_black, tt_stats * stats, u8 traversed[static TOTAL_BOARD_SIZ] ) { stats->expansion_delay--; if (stats->expansion_delay == -1) { init_new_state(stats, cb, is_black); } omp_unset_lock(&stats->lock); d16 outcome = playout_heavy_amaf(cb, is_black, traversed); return outcome; } static d16 mcts_selection( cfg_board * cb, u64 zobrist_hash, bool is_black ) { d16 depth = 6; tt_stats * stats[MAX_UCT_DEPTH + 6]; tt_play * plays[MAX_UCT_DEPTH + 7]; /* for testing superko */ stats[0] = stats[1] = stats[2] = stats[3] = stats[4] = stats[5] = NULL; u8 traversed[TOTAL_BOARD_SIZ]; memset(traversed, EMPTY, TOTAL_BOARD_SIZ); d16 outcome; tt_stats * curr_stats = NULL; tt_play * play = NULL; while (1) { if (depth >= MAX_UCT_DEPTH + 6) { outcome = score_stones_and_area(cb->p); break; } if (curr_stats == NULL) { curr_stats = tt_lookup_null(cb, is_black, zobrist_hash); if (curr_stats == NULL) { if (!ran_out_of_memory) { ran_out_of_memory = true; search_stop = true; } outcome = playout_heavy_amaf(cb, is_black, traversed); break; } else if (play != NULL) { play->next_stats = curr_stats; } } else { omp_set_lock(&curr_stats->lock); } /* Positional superko detection */ if (is_board_move(cb->last_played) && (stats[depth - 2] == curr_stats || stats[depth - 3] == curr_stats || stats[depth - 4] == curr_stats || stats[depth - 5] == curr_stats || stats[depth - 6] == curr_stats)) { omp_unset_lock(&curr_stats->lock); /* loss for player that committed superko */ outcome = is_black ? 1 : -1; break; } if (curr_stats->expansion_delay >= 0) { /* already unsets lock */ outcome = mcts_expansion(cb, is_black, curr_stats, traversed); break; } select_play(curr_stats, &play); play->mc_n++; play->mc_q -= play->mc_q / play->mc_n; omp_unset_lock(&curr_stats->lock); if (play->m == PASS) { if (cb->last_played == PASS) { outcome = score_stones_and_area(cb->p); break; } just_pass(cb); } else { just_play2(cb, is_black, play->m, &zobrist_hash); } plays[depth] = play; stats[depth] = curr_stats; ++depth; curr_stats = play->next_stats; is_black = !is_black; } if (outcome == 0) { for (d16 k = depth - 1; k >= 6; --k) { is_black = !is_black; move m = plays[k]->m; omp_set_lock(&stats[k]->lock); /* LGRF */ plays[k]->lgrf1_reply = NULL; /* AMAF/RAVE */ if (m != PASS) { traversed[m] = is_black ? BLACK_STONE : WHITE_STONE; } update_amaf_stats2(stats[k], traversed, is_black); omp_unset_lock(&stats[k]->lock); } } else { plays[depth] = NULL; for (d16 k = depth - 1; k >= 6; --k) { is_black = !is_black; move m = plays[k]->m; double z = (is_black == (outcome > 0)) ? 1.0 : 0.0; omp_set_lock(&stats[k]->lock); /* MC sampling */ if (is_black == (outcome > 0)) { plays[k]->mc_q += 1.0 / plays[k]->mc_n; } /* AMAF/RAVE */ if (m != PASS) { traversed[m] = is_black ? BLACK_STONE : WHITE_STONE; } update_amaf_stats(stats[k], traversed, is_black, z); /* LGRF */ if (is_black == (outcome > 0)) { plays[k]->lgrf1_reply = NULL; } else { plays[k]->lgrf1_reply = plays[k + 1]; } /* Criticality */ if (m != PASS && cb->p[m] != EMPTY) { double winner_owns_coord = ((outcome > 0) == (cb->p[m] == BLACK_STONE)) ? 1.0 : 0.0; plays[k]->owner_winning += (winner_owns_coord - plays[k]->owner_winning) / plays[k]->mc_n; double player_owns_coord = (is_black == (cb->p[m] == BLACK_STONE)) ? 1.0 : 0.0; plays[k]->color_owning += (player_owns_coord - plays[k]->color_owning) / plays[k]->mc_n; } omp_unset_lock(&stats[k]->lock); } } if (depth > max_depths[omp_get_thread_num()]) { max_depths[omp_get_thread_num()] = depth; } return outcome; } /* Performs a MCTS in at least the available time. The search may end early if the estimated win rate is very one sided, in which case the play selected is a pass. The search is also interrupted if memory runs out. RETURNS true if a play or pass is suggested instead of resigning */ bool mcts_start_timed( out_board * out_b, const board * b, bool is_black, u64 stop_time, u64 early_stop_time ) { mcts_init(); u64 start_zobrist_hash = zobrist_new_hash(b); tt_stats * stats = tt_lookup_create(b, is_black, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, is_black); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); u32 draws = 0; u32 wins = 0; u32 losses = 0; ran_out_of_memory = false; search_stop = false; bool stopped_early_by_wr = false; #pragma omp parallel for for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); d16 outcome = mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (outcome == 0) { #pragma omp atomic draws++; } else if ((outcome > 0) == is_black) { #pragma omp atomic wins++; } else { #pragma omp atomic losses++; } if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); #if UCT_CAN_STOP_EARLY if (curr_time >= early_stop_time) { if (curr_time >= stop_time) { search_stop = true; } else { double wr = ((double)wins) / ((double)(wins + losses)); if (wr >= UCT_EARLY_WINRATE) { stopped_early_by_wr = true; search_stop = true; } } } #else if (curr_time >= stop_time) { search_stop = true; } #endif } } if (ran_out_of_memory) { flog_warn("uct", "search ran out of memory"); } char * s = alloc(); if (stopped_early_by_wr) { d64 diff = stop_time - current_time_in_millis(); char * s2 = alloc(); format_nr_millis(s2, diff); snprintf(s, MAX_PAGE_SIZ, "search ended %s early", s2); release(s2); flog_info("uct", s); } clear_out_board(out_b); out_b->pass = UCT_RESIGN_WINRATE; for (move k = 0; k < stats->plays_count; ++k) { if (stats->plays[k].m == PASS) { out_b->pass = stats->plays[k].mc_q; } else { out_b->tested[stats->plays[k].m] = true; #if USE_AMAF_RAVE out_b->value[stats->plays[k].m] = uct1_rave(&stats->plays[k]); #else out_b->value[stats->plays[k].m] = stats->plays[k].mc_q; #endif } } u16 max_depth = max_depths[0]; for (u16 k = 1; k < MAXIMUM_NUM_THREADS; ++k) { if (max_depths[k] > max_depth) { max_depth = max_depths[k]; } } max_depth -= 6; u32 simulations = wins + losses; double wr = ((double)wins) / ((double)simulations); if (draws > 0) { simulations += draws; snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f, draws=%u)\n", simulations, max_depth, wr, draws); } else { snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f)\n", simulations, max_depth, wr); } flog_info("uct", s); release(s); cfg_board_free(&initial_cfg_board); /* prevent resignation unless we have played very few simulations */ if (simulations >= UCT_RESIGN_PLAYOUTS && wr < UCT_RESIGN_WINRATE) { return false; } return true; } /* Performs a MCTS for the selected number of simulations. The search is interrupted if memory runs out. RETURNS true if a play or pass is suggested instead of resigning */ bool mcts_start_sims( out_board * out_b, const board * b, bool is_black, u32 simulations ) { mcts_init(); u64 start_zobrist_hash = zobrist_new_hash(b); tt_stats * stats = tt_lookup_create(b, is_black, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, is_black); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); u32 draws = 0; u32 wins = 0; u32 losses = 0; ran_out_of_memory = false; search_stop = false; #pragma omp parallel for for (u32 sim = 0; sim < simulations; ++sim) { cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); d16 outcome = mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (outcome == 0) { #pragma omp atomic draws++; } else if ((outcome > 0) == is_black) { #pragma omp atomic wins++; } else { #pragma omp atomic losses++; } } if (ran_out_of_memory) { flog_warn("uct", "search ran out of memory"); } char * s = alloc(); clear_out_board(out_b); out_b->pass = UCT_RESIGN_WINRATE; for (move k = 0; k < stats->plays_count; ++k) { if (stats->plays[k].m == PASS) { out_b->pass = stats->plays[k].mc_q; } else { out_b->tested[stats->plays[k].m] = true; #if USE_AMAF_RAVE out_b->value[stats->plays[k].m] = uct1_rave(&stats->plays[k]); #else out_b->value[stats->plays[k].m] = stats->plays[k].mc_q; #endif } } u16 max_depth = max_depths[0]; for (u16 k = 1; k < MAXIMUM_NUM_THREADS; ++k) { if (max_depths[k] > max_depth) { max_depth = max_depths[k]; } } double wr; if (draws > 0) { wr = ((double)wins) / ((double)(wins + losses)); snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f, draws=%u)\n", simulations, max_depth, wr, draws); } else { wr = ((double)wins) / ((double)simulations); snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f)\n", simulations, max_depth, wr); } flog_info("uct", s); release(s); cfg_board_free(&initial_cfg_board); if (wr < UCT_RESIGN_WINRATE) { return false; } return true; } /* Reset whether MCTS can run in the background after a previous attempt may have run out of memory. */ void reset_mcts_can_resume() { mcts_can_resume = true; } /* Continue a previous MCTS. */ void mcts_resume( const board * b, bool is_black ) { if (!mcts_can_resume) { return; } mcts_init(); u64 stop_time = current_time_in_millis() + 50; ran_out_of_memory = false; search_stop = false; u64 start_zobrist_hash = zobrist_new_hash(b); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); #pragma omp parallel for for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); if (curr_time >= stop_time) { search_stop = true; } } } if (ran_out_of_memory) { mcts_can_resume = false; } cfg_board_free(&initial_cfg_board); } /* Execute a 1 second MCTS and return the number of simulations ran. RETURNS simulations number */ u32 mcts_benchmark( u32 time_available /* in milliseconds */ ) { mcts_init(); board b; clear_board(&b); u64 curr_time = current_time_in_millis(); u64 stop_time = curr_time + time_available; u64 start_zobrist_hash = zobrist_new_hash(&b); tt_stats * stats = tt_lookup_create(&b, true, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, &b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, true); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); bool search_stop = false; u32 simulations = 0; /* TODO: do a longer initial run to initialize state */ #pragma omp parallel for for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; /* TODO: change to break */ } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); mcts_selection(&cb, start_zobrist_hash, true); cfg_board_free(&cb); #pragma omp atomic simulations++; if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); if (curr_time >= stop_time) { search_stop = true; } } } cfg_board_free(&initial_cfg_board); return simulations; }
/* * Heuristic UCT-RAVE implementation. * * With RAVE and criticality. Playout is limited with dynamic offset depending * on stone count. Cutoff playouts are rated. Playouts are cut short with a * mercy threshold (like pachi, orego and others). Initilizes expanded states * with prior values. Last-good-reply with forgetting (LGRF1) is also used. A * virtual loss is also added on play traversion, that is later corrected if * needed. * * MCTS can be resumed on demand by a few extra simulations at a time. It can * also record the average final score, for the purpose of score estimation. */ #include "config.h" #include <stdio.h> #include <string.h> #include <math.h> /* for round, sqrt */ #include <stdlib.h> #include <assert.h> #include <omp.h> #include "alloc.h" #include "amaf_rave.h" #include "board.h" #include "cfg_board.h" #include "constants.h" #include "flog.h" #include "mcts.h" #include "move.h" #include "pat3.h" #include "playout.h" #include "priors.h" #include "pts_file.h" #include "randg.h" #include "scoring.h" #include "state_changes.h" #include "stringm.h" #include "timem.h" #include "transpositions.h" #include "types.h" #include "zobrist.h" /* from board_constants */ extern u8 distances_to_border[TOTAL_BOARD_SIZ]; extern move_seq nei_dst_3[TOTAL_BOARD_SIZ]; static bool ran_out_of_memory; static bool search_stop; static u16 max_depths[MAXIMUM_NUM_THREADS]; /* * Whether a MCTS can be started on background. Is disabled if memory runs * out, and needs to be reset before testing again if can be run. */ static bool mcts_can_resume = true; static bool uct_inited = false; /* * Initiate MCTS dependencies. */ void mcts_init() { if (uct_inited) { return; } rand_init(); board_constants_init(); zobrist_init(); pat3_init(); tt_init(); load_starting_points(); uct_inited = true; } static void select_play( tt_stats * stats, tt_play ** play ) { if (*play != NULL && (*play)->lgrf1_reply != NULL) { *play = (*play)->lgrf1_reply; return; } tt_play *best_plays[TOTAL_BOARD_SIZ]; double best_q = -1.0; u16 equal_quality_plays = 0; for (move k = 0; k < stats->plays_count; ++k) { #if USE_AMAF_RAVE double play_q = uct1_rave(&stats->plays[k]); #else double play_q = stats->plays[k].mc_q; #endif double uct_q = play_q; if (uct_q > best_q) { best_plays[0] = &stats->plays[k]; equal_quality_plays = 1; best_q = uct_q; } else if (uct_q == best_q) { best_plays[equal_quality_plays] = &stats->plays[k]; ++equal_quality_plays; } } if (equal_quality_plays == 1) { *play = best_plays[0]; return; } if (equal_quality_plays > 1) { u16 p = rand_u16(equal_quality_plays); *play = best_plays[p]; return; } flog_crit("mcts", "play selection exception"); } static d16 mcts_expansion( cfg_board * cb, bool is_black, tt_stats * stats, u8 traversed[static TOTAL_BOARD_SIZ] ) { stats->expansion_delay--; if (stats->expansion_delay == -1) { init_new_state(stats, cb, is_black); } omp_unset_lock(&stats->lock); d16 outcome = playout_heavy_amaf(cb, is_black, traversed); return outcome; } static d16 mcts_selection( cfg_board * cb, u64 zobrist_hash, bool is_black ) { d16 depth = 6; tt_stats *stats[MAX_UCT_DEPTH + 6]; tt_play *plays[MAX_UCT_DEPTH + 7]; /* for testing superko */ stats[0] = stats[1] = stats[2] = stats[3] = stats[4] = stats[5] = NULL; u8 traversed[TOTAL_BOARD_SIZ]; memset(traversed, EMPTY, TOTAL_BOARD_SIZ); d16 outcome; tt_stats *curr_stats = NULL; tt_play *play = NULL; while (1) { if (depth >= MAX_UCT_DEPTH + 6) { outcome = score_stones_and_area(cb->p); break; } if (curr_stats == NULL) { curr_stats = tt_lookup_null(cb, is_black, zobrist_hash); if (curr_stats == NULL) { if (!ran_out_of_memory) { ran_out_of_memory = true; search_stop = true; } outcome = playout_heavy_amaf(cb, is_black, traversed); break; } else if (play != NULL) { play->next_stats = curr_stats; } } else { omp_set_lock(&curr_stats->lock); } /* Positional superko detection */ if (is_board_move(cb->last_played) && (stats[depth - 2] == curr_stats || stats[depth - 3] == curr_stats || stats[depth - 4] == curr_stats || stats[depth - 5] == curr_stats || stats[depth - 6] == curr_stats)) { omp_unset_lock(&curr_stats->lock); /* loss for player that committed superko */ outcome = is_black ? 1 : -1; break; } if (curr_stats->expansion_delay >= 0) { /* already unsets lock */ outcome = mcts_expansion(cb, is_black, curr_stats, traversed); break; } select_play(curr_stats, &play); play->mc_n++; play->mc_q -= play->mc_q / play->mc_n; omp_unset_lock(&curr_stats->lock); if (play->m == PASS) { if (cb->last_played == PASS) { outcome = score_stones_and_area(cb->p); break; } just_pass(cb); } else { just_play2(cb, is_black, play->m, &zobrist_hash); } plays[depth] = play; stats[depth] = curr_stats; ++depth; curr_stats = play->next_stats; is_black = !is_black; } if (outcome == 0) { for (d16 k = depth - 1; k >= 6; --k) { is_black = !is_black; move m = plays[k]->m; omp_set_lock(&stats[k]->lock); /* LGRF */ plays[k]->lgrf1_reply = NULL; /* AMAF/RAVE */ if (m != PASS) { traversed[m] = is_black ? BLACK_STONE : WHITE_STONE; } update_amaf_stats2(stats[k], traversed, is_black); omp_unset_lock(&stats[k]->lock); } } else { plays[depth] = NULL; for (d16 k = depth - 1; k >= 6; --k) { is_black = !is_black; move m = plays[k]->m; double z = (is_black == (outcome > 0)) ? 1.0 : 0.0; omp_set_lock(&stats[k]->lock); /* MC sampling */ if (is_black == (outcome > 0)) { plays[k]->mc_q += 1.0 / plays[k]->mc_n; } /* AMAF/RAVE */ if (m != PASS) { traversed[m] = is_black ? BLACK_STONE : WHITE_STONE; } update_amaf_stats(stats[k], traversed, is_black, z); /* LGRF */ if (is_black == (outcome > 0)) { plays[k]->lgrf1_reply = NULL; } else { plays[k]->lgrf1_reply = plays[k + 1]; } /* Criticality */ if (m != PASS && cb->p[m] != EMPTY) { double winner_owns_coord = ((outcome > 0) == (cb->p[m] == BLACK_STONE)) ? 1.0 : 0.0; plays[k]->owner_winning += (winner_owns_coord - plays[k]->owner_winning) / plays[k]->mc_n; double player_owns_coord = (is_black == (cb->p[m] == BLACK_STONE)) ? 1.0 : 0.0; plays[k]->color_owning += (player_owns_coord - plays[k]->color_owning) / plays[k]->mc_n; } omp_unset_lock(&stats[k]->lock); } } if (depth > max_depths[omp_get_thread_num()]) { max_depths[omp_get_thread_num()] = depth; } return outcome; } /* * Performs a MCTS in at least the available time. * * The search may end early if the estimated win rate is very one sided, in * which case the play selected is a pass. The search is also interrupted if * memory runs out. RETURNS true if a play or pass is suggested instead of * resigning */ bool mcts_start_timed( out_board * out_b, const board * b, bool is_black, u64 stop_time, u64 early_stop_time ) { mcts_init(); u64 start_zobrist_hash = zobrist_new_hash(b); tt_stats *stats = tt_lookup_create(b, is_black, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, is_black); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); u32 draws = 0; u32 wins = 0; u32 losses = 0; ran_out_of_memory = false; search_stop = false; bool stopped_early_by_wr = false; for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); d16 outcome = mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (outcome == 0) { draws++; } else if ((outcome > 0) == is_black) { wins++; } else { losses++; } if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); #if UCT_CAN_STOP_EARLY if (curr_time >= early_stop_time) { if (curr_time >= stop_time) { search_stop = true; } else { double wr = ((double)wins) / ((double)(wins + losses)); if (wr >= UCT_EARLY_WINRATE) { stopped_early_by_wr = true; search_stop = true; } } } #else if (curr_time >= stop_time) { search_stop = true; } #endif } } if (ran_out_of_memory) { flog_warn("uct", "search ran out of memory"); } char *s = alloc(); if (stopped_early_by_wr) { d64 diff = stop_time - current_time_in_millis(); char *s2 = alloc(); format_nr_millis(s2, diff); snprintf(s, MAX_PAGE_SIZ, "search ended %s early", s2); release(s2); flog_info("uct", s); } clear_out_board(out_b); out_b->pass = UCT_RESIGN_WINRATE; for (move k = 0; k < stats->plays_count; ++k) { if (stats->plays[k].m == PASS) { out_b->pass = stats->plays[k].mc_q; } else { out_b->tested[stats->plays[k].m] = true; #if USE_AMAF_RAVE out_b->value[stats->plays[k].m] = uct1_rave(&stats->plays[k]); #else out_b->value[stats->plays[k].m] = stats->plays[k].mc_q; #endif } } u16 max_depth = max_depths[0]; for (u16 k = 1; k < MAXIMUM_NUM_THREADS; ++k) { if (max_depths[k] > max_depth) { max_depth = max_depths[k]; } } max_depth -= 6; u32 simulations = wins + losses; double wr = ((double)wins) / ((double)simulations); if (draws > 0) { simulations += draws; snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f, draws=%u)\n", simulations, max_depth, wr, draws); } else { snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f)\n", simulations, max_depth, wr); } flog_info("uct", s); release(s); cfg_board_free(&initial_cfg_board); /* prevent resignation unless we have played very few simulations */ if (simulations >= UCT_RESIGN_PLAYOUTS && wr < UCT_RESIGN_WINRATE) { return false; } return true; } /* * Performs a MCTS for the selected number of simulations. * * The search is interrupted if memory runs out. RETURNS true if a play or pass * is suggested instead of resigning */ bool mcts_start_sims( out_board * out_b, const board * b, bool is_black, u32 simulations ) { mcts_init(); u64 start_zobrist_hash = zobrist_new_hash(b); tt_stats *stats = tt_lookup_create(b, is_black, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, is_black); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); u32 draws = 0; u32 wins = 0; u32 losses = 0; ran_out_of_memory = false; search_stop = false; for (u32 sim = 0; sim < simulations; ++sim) { cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); d16 outcome = mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (outcome == 0) { draws++; } else if ((outcome > 0) == is_black) { wins++; } else { losses++; } } if (ran_out_of_memory) { flog_warn("uct", "search ran out of memory"); } char *s = alloc(); clear_out_board(out_b); out_b->pass = UCT_RESIGN_WINRATE; for (move k = 0; k < stats->plays_count; ++k) { if (stats->plays[k].m == PASS) { out_b->pass = stats->plays[k].mc_q; } else { out_b->tested[stats->plays[k].m] = true; #if USE_AMAF_RAVE out_b->value[stats->plays[k].m] = uct1_rave(&stats->plays[k]); #else out_b->value[stats->plays[k].m] = stats->plays[k].mc_q; #endif } } u16 max_depth = max_depths[0]; for (u16 k = 1; k < MAXIMUM_NUM_THREADS; ++k) { if (max_depths[k] > max_depth) { max_depth = max_depths[k]; } } double wr; if (draws > 0) { wr = ((double)wins) / ((double)(wins + losses)); snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f, draws=%u)\n", simulations, max_depth, wr, draws); } else { wr = ((double)wins) / ((double)simulations); snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f)\n", simulations, max_depth, wr); } flog_info("uct", s); release(s); cfg_board_free(&initial_cfg_board); if (wr < UCT_RESIGN_WINRATE) { return false; } return true; } /* * Reset whether MCTS can run in the background after a previous attempt may * have run out of memory. */ void reset_mcts_can_resume() { mcts_can_resume = true; } /* * Continue a previous MCTS. */ void mcts_resume( const board * b, bool is_black ) { if (!mcts_can_resume) { return; } mcts_init(); u64 stop_time = current_time_in_millis() + 50; ran_out_of_memory = false; search_stop = false; u64 start_zobrist_hash = zobrist_new_hash(b); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); if (curr_time >= stop_time) { search_stop = true; } } } if (ran_out_of_memory) { mcts_can_resume = false; } cfg_board_free(&initial_cfg_board); } /* * Execute a 1 second MCTS and return the number of simulations ran. RETURNS * simulations number */ u32 mcts_benchmark( u32 time_available /* in milliseconds */ ) { mcts_init(); board b; clear_board(&b); u64 curr_time = current_time_in_millis(); u64 stop_time = curr_time + time_available; u64 start_zobrist_hash = zobrist_new_hash(&b); tt_stats *stats = tt_lookup_create(&b, true, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, &b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, true); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); bool search_stop = false; u32 simulations = 0; /* TODO: do a longer initial run to initialize state */ for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; /* TODO: change to break */ } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); mcts_selection(&cb, start_zobrist_hash, true); cfg_board_free(&cb); simulations++; if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); if (curr_time >= stop_time) { search_stop = true; } } } cfg_board_free(&initial_cfg_board); return simulations; }
/* * Heuristic UCT-RAVE implementation. * * With RAVE and criticality. Playout is limited with dynamic offset depending * on stone count. Cutoff playouts are rated. Playouts are cut short with a * mercy threshold (like pachi, orego and others). Initilizes expanded states * with prior values. Last-good-reply with forgetting (LGRF1) is also used. A * virtual loss is also added on play traversion, that is later corrected if * needed. * * MCTS can be resumed on demand by a few extra simulations at a time. It can * also record the average final score, for the purpose of score estimation. */ #include "config.h" #include <stdio.h> #include <string.h> #include <math.h> /* for round, sqrt */ #include <stdlib.h> #include <assert.h> #include <omp.h> #include "alloc.h" #include "amaf_rave.h" #include "board.h" #include "cfg_board.h" #include "constants.h" #include "flog.h" #include "mcts.h" #include "move.h" #include "pat3.h" #include "playout.h" #include "priors.h" #include "pts_file.h" #include "randg.h" #include "scoring.h" #include "state_changes.h" #include "stringm.h" #include "timem.h" #include "transpositions.h" #include "types.h" #include "zobrist.h" /* from board_constants */ extern u8 distances_to_border[TOTAL_BOARD_SIZ]; extern move_seq nei_dst_3[TOTAL_BOARD_SIZ]; static bool ran_out_of_memory; static bool search_stop; static u16 max_depths[MAXIMUM_NUM_THREADS]; /* * Whether a MCTS can be started on background. Is disabled if memory runs * out, and needs to be reset before testing again if can be run. */ static bool mcts_can_resume = true; static bool uct_inited = false; /* * Initiate MCTS dependencies. */ void mcts_init() { if (uct_inited) { return; } rand_init(); board_constants_init(); zobrist_init(); pat3_init(); tt_init(); load_starting_points(); uct_inited = true; } static void select_play( tt_stats * stats, tt_play ** play ) { if (*play != NULL && (*play)->lgrf1_reply != NULL) { *play = (*play)->lgrf1_reply; return; } tt_play *best_plays[TOTAL_BOARD_SIZ]; double best_q = -1.0; u16 equal_quality_plays = 0; for (move k = 0; k < stats->plays_count; ++k) { #if USE_AMAF_RAVE double play_q = uct1_rave(&stats->plays[k]); #else double play_q = stats->plays[k].mc_q; #endif double uct_q = play_q; if (uct_q > best_q) { best_plays[0] = &stats->plays[k]; equal_quality_plays = 1; best_q = uct_q; } else if (uct_q == best_q) { best_plays[equal_quality_plays] = &stats->plays[k]; ++equal_quality_plays; } } if (equal_quality_plays == 1) { *play = best_plays[0]; return; } if (equal_quality_plays > 1) { u16 p = rand_u16(equal_quality_plays); *play = best_plays[p]; return; } flog_crit("mcts", "play selection exception"); } static d16 mcts_expansion( cfg_board * cb, bool is_black, tt_stats * stats, u8 traversed[static TOTAL_BOARD_SIZ] ) { stats->expansion_delay--; if (stats->expansion_delay == -1) { init_new_state(stats, cb, is_black); } omp_unset_lock(&stats->lock); d16 outcome = playout_heavy_amaf(cb, is_black, traversed); return outcome; } static d16 mcts_selection( cfg_board * cb, u64 zobrist_hash, bool is_black ) { d16 depth = 6; tt_stats *stats[MAX_UCT_DEPTH + 6]; tt_play *plays[MAX_UCT_DEPTH + 7]; /* for testing superko */ stats[0] = stats[1] = stats[2] = stats[3] = stats[4] = stats[5] = NULL; u8 traversed[TOTAL_BOARD_SIZ]; memset(traversed, EMPTY, TOTAL_BOARD_SIZ); d16 outcome; tt_stats *curr_stats = NULL; tt_play *play = NULL; while (1) { if (depth >= MAX_UCT_DEPTH + 6) { outcome = score_stones_and_area(cb->p); break; } if (curr_stats == NULL) { curr_stats = tt_lookup_null(cb, is_black, zobrist_hash); if (curr_stats == NULL) { if (!ran_out_of_memory) { ran_out_of_memory = true; search_stop = true; } outcome = playout_heavy_amaf(cb, is_black, traversed); break; } else if (play != NULL) { play->next_stats = curr_stats; } } else { omp_set_lock(&curr_stats->lock); } /* Positional superko detection */ if (is_board_move(cb->last_played) && (stats[depth - 2] == curr_stats || stats[depth - 3] == curr_stats || stats[depth - 4] == curr_stats || stats[depth - 5] == curr_stats || stats[depth - 6] == curr_stats)) { omp_unset_lock(&curr_stats->lock); /* loss for player that committed superko */ outcome = is_black ? 1 : -1; break; } if (curr_stats->expansion_delay >= 0) { /* already unsets lock */ outcome = mcts_expansion(cb, is_black, curr_stats, traversed); break; } select_play(curr_stats, &play); play->mc_n++; play->mc_q -= play->mc_q / play->mc_n; omp_unset_lock(&curr_stats->lock); if (play->m == PASS) { if (cb->last_played == PASS) { outcome = score_stones_and_area(cb->p); break; } just_pass(cb); } else { just_play2(cb, is_black, play->m, &zobrist_hash); } plays[depth] = play; stats[depth] = curr_stats; ++depth; curr_stats = play->next_stats; is_black = !is_black; } if (outcome == 0) { for (d16 k = depth - 1; k >= 6; --k) { is_black = !is_black; move m = plays[k]->m; omp_set_lock(&stats[k]->lock); /* LGRF */ plays[k]->lgrf1_reply = NULL; /* AMAF/RAVE */ if (m != PASS) { traversed[m] = is_black ? BLACK_STONE : WHITE_STONE; } update_amaf_stats2(stats[k], traversed, is_black); omp_unset_lock(&stats[k]->lock); } } else { plays[depth] = NULL; for (d16 k = depth - 1; k >= 6; --k) { is_black = !is_black; move m = plays[k]->m; double z = (is_black == (outcome > 0)) ? 1.0 : 0.0; omp_set_lock(&stats[k]->lock); /* MC sampling */ if (is_black == (outcome > 0)) { plays[k]->mc_q += 1.0 / plays[k]->mc_n; } /* AMAF/RAVE */ if (m != PASS) { traversed[m] = is_black ? BLACK_STONE : WHITE_STONE; } update_amaf_stats(stats[k], traversed, is_black, z); /* LGRF */ if (is_black == (outcome > 0)) { plays[k]->lgrf1_reply = NULL; } else { plays[k]->lgrf1_reply = plays[k + 1]; } /* Criticality */ if (m != PASS && cb->p[m] != EMPTY) { double winner_owns_coord = ((outcome > 0) == (cb->p[m] == BLACK_STONE)) ? 1.0 : 0.0; plays[k]->owner_winning += (winner_owns_coord - plays[k]->owner_winning) / plays[k]->mc_n; double player_owns_coord = (is_black == (cb->p[m] == BLACK_STONE)) ? 1.0 : 0.0; plays[k]->color_owning += (player_owns_coord - plays[k]->color_owning) / plays[k]->mc_n; } omp_unset_lock(&stats[k]->lock); } } if (depth > max_depths[omp_get_thread_num()]) { max_depths[omp_get_thread_num()] = depth; } return outcome; } /* * Performs a MCTS in at least the available time. * * The search may end early if the estimated win rate is very one sided, in * which case the play selected is a pass. The search is also interrupted if * memory runs out. RETURNS true if a play or pass is suggested instead of * resigning */ bool mcts_start_timed( out_board * out_b, const board * b, bool is_black, u64 stop_time, u64 early_stop_time ) { mcts_init(); u64 start_zobrist_hash = zobrist_new_hash(b); tt_stats *stats = tt_lookup_create(b, is_black, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, is_black); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); u32 draws = 0; u32 wins = 0; u32 losses = 0; ran_out_of_memory = false; search_stop = false; bool stopped_early_by_wr = false; #pragma omp parallel for for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); d16 outcome = mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (outcome == 0) { #pragma omp atomic draws++; } else if ((outcome > 0) == is_black) { #pragma omp atomic wins++; } else { #pragma omp atomic losses++; } if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); #if UCT_CAN_STOP_EARLY if (curr_time >= early_stop_time) { if (curr_time >= stop_time) { search_stop = true; } else { double wr = ((double)wins) / ((double)(wins + losses)); if (wr >= UCT_EARLY_WINRATE) { stopped_early_by_wr = true; search_stop = true; } } } #else if (curr_time >= stop_time) { search_stop = true; } #endif } } if (ran_out_of_memory) { flog_warn("uct", "search ran out of memory"); } char *s = alloc(); if (stopped_early_by_wr) { d64 diff = stop_time - current_time_in_millis(); char *s2 = alloc(); format_nr_millis(s2, diff); snprintf(s, MAX_PAGE_SIZ, "search ended %s early", s2); release(s2); flog_info("uct", s); } clear_out_board(out_b); out_b->pass = UCT_RESIGN_WINRATE; for (move k = 0; k < stats->plays_count; ++k) { if (stats->plays[k].m == PASS) { out_b->pass = stats->plays[k].mc_q; } else { out_b->tested[stats->plays[k].m] = true; #if USE_AMAF_RAVE out_b->value[stats->plays[k].m] = uct1_rave(&stats->plays[k]); #else out_b->value[stats->plays[k].m] = stats->plays[k].mc_q; #endif } } u16 max_depth = max_depths[0]; for (u16 k = 1; k < MAXIMUM_NUM_THREADS; ++k) { if (max_depths[k] > max_depth) { max_depth = max_depths[k]; } } max_depth -= 6; u32 simulations = wins + losses; double wr = ((double)wins) / ((double)simulations); if (draws > 0) { simulations += draws; snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f, draws=%u)\n", simulations, max_depth, wr, draws); } else { snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f)\n", simulations, max_depth, wr); } flog_info("uct", s); release(s); cfg_board_free(&initial_cfg_board); /* prevent resignation unless we have played very few simulations */ if (simulations >= UCT_RESIGN_PLAYOUTS && wr < UCT_RESIGN_WINRATE) { return false; } return true; } /* * Performs a MCTS for the selected number of simulations. * * The search is interrupted if memory runs out. RETURNS true if a play or pass * is suggested instead of resigning */ bool mcts_start_sims( out_board * out_b, const board * b, bool is_black, u32 simulations ) { mcts_init(); u64 start_zobrist_hash = zobrist_new_hash(b); tt_stats *stats = tt_lookup_create(b, is_black, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, is_black); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); u32 draws = 0; u32 wins = 0; u32 losses = 0; ran_out_of_memory = false; search_stop = false; #pragma omp parallel for for (u32 sim = 0; sim < simulations; ++sim) { cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); d16 outcome = mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (outcome == 0) { #pragma omp atomic draws++; } else if ((outcome > 0) == is_black) { #pragma omp atomic wins++; } else { #pragma omp atomic losses++; } } if (ran_out_of_memory) { flog_warn("uct", "search ran out of memory"); } char *s = alloc(); clear_out_board(out_b); out_b->pass = UCT_RESIGN_WINRATE; for (move k = 0; k < stats->plays_count; ++k) { if (stats->plays[k].m == PASS) { out_b->pass = stats->plays[k].mc_q; } else { out_b->tested[stats->plays[k].m] = true; #if USE_AMAF_RAVE out_b->value[stats->plays[k].m] = uct1_rave(&stats->plays[k]); #else out_b->value[stats->plays[k].m] = stats->plays[k].mc_q; #endif } } u16 max_depth = max_depths[0]; for (u16 k = 1; k < MAXIMUM_NUM_THREADS; ++k) { if (max_depths[k] > max_depth) { max_depth = max_depths[k]; } } double wr; if (draws > 0) { wr = ((double)wins) / ((double)(wins + losses)); snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f, draws=%u)\n", simulations, max_depth, wr, draws); } else { wr = ((double)wins) / ((double)simulations); snprintf(s, MAX_PAGE_SIZ, "search finished (sims=%u, depth=%u, wr=%.2f)\n", simulations, max_depth, wr); } flog_info("uct", s); release(s); cfg_board_free(&initial_cfg_board); if (wr < UCT_RESIGN_WINRATE) { return false; } return true; } /* * Reset whether MCTS can run in the background after a previous attempt may * have run out of memory. */ void reset_mcts_can_resume() { mcts_can_resume = true; } /* * Continue a previous MCTS. */ void mcts_resume( const board * b, bool is_black ) { if (!mcts_can_resume) { return; } mcts_init(); u64 stop_time = current_time_in_millis() + 50; ran_out_of_memory = false; search_stop = false; u64 start_zobrist_hash = zobrist_new_hash(b); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, b); #pragma omp parallel for for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); mcts_selection(&cb, start_zobrist_hash, is_black); cfg_board_free(&cb); if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); if (curr_time >= stop_time) { search_stop = true; } } } if (ran_out_of_memory) { mcts_can_resume = false; } cfg_board_free(&initial_cfg_board); } /* * Execute a 1 second MCTS and return the number of simulations ran. RETURNS * simulations number */ u32 mcts_benchmark( u32 time_available /* in milliseconds */ ) { mcts_init(); board b; clear_board(&b); u64 curr_time = current_time_in_millis(); u64 stop_time = curr_time + time_available; u64 start_zobrist_hash = zobrist_new_hash(&b); tt_stats *stats = tt_lookup_create(&b, true, start_zobrist_hash); omp_unset_lock(&stats->lock); cfg_board initial_cfg_board; cfg_from_board(&initial_cfg_board, &b); if (stats->expansion_delay != -1) { stats->expansion_delay = -1; init_new_state(stats, &initial_cfg_board, true); } memset(max_depths, 0, sizeof(u16) * MAXIMUM_NUM_THREADS); bool search_stop = false; u32 simulations = 0; /* TODO: do a longer initial run to initialize state */ #pragma omp parallel for for (u32 sim = 0; sim < INT32_MAX; ++sim) { if (search_stop) { /* there is no way to simultaneously cancel all OMP threads */ sim = INT32_MAX; continue; /* TODO: change to break */ } cfg_board cb; cfg_board_clone(&cb, &initial_cfg_board); mcts_selection(&cb, start_zobrist_hash, true); cfg_board_free(&cb); #pragma omp atomic simulations++; if (omp_get_thread_num() == 0) { u64 curr_time = current_time_in_millis(); if (curr_time >= stop_time) { search_stop = true; } } } cfg_board_free(&initial_cfg_board); return simulations; }
GB_reduce_build_template.c
//------------------------------------------------------------------------------ // GB_build_template: T=build(S), and assemble any duplicate tuples //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // This template is used in GB_builder and the Generated/GB_red_build__* // workers. This is the same for both vectors and matrices, since this step is // agnostic about which vectors the entries appear. { // k unused for some uses of this template #include "GB_unused.h" if (ndupl == 0) { //---------------------------------------------------------------------- // no duplicates, just permute S into Tx //---------------------------------------------------------------------- // If no duplicates are present, then GB_builder has already // transplanted I_work into T->i, so this step does not need to // construct T->i. The tuple values, in S, are copied or permuted into // T->x. if (K_work == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; for (int64_t t = tstart ; t < tend ; t++) { // Tx [t] = (ttype) S [t] ; with typecast GB_CAST_ARRAY_TO_ARRAY (Tx, t, S, t) ; } } } else { #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; for (int64_t t = tstart ; t < tend ; t++) { // Tx [t] = (ttype) S [K_work [t]] ; with typecast GB_CAST_ARRAY_TO_ARRAY (Tx, t, S, K_work [t]) ; } } } } else { //---------------------------------------------------------------------- // assemble duplicates //---------------------------------------------------------------------- // Entries in S must be copied into T->x, with any duplicates summed // via the operator. T->i must also be constructed. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnz = tnz_slice [tid] ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; // find the first unique tuple owned by this slice int64_t t ; for (t = tstart ; t < tend ; t++) { // get the tuple and break if it is not a duplicate if (I_work [t] >= 0) break ; } // scan all tuples and assemble any duplicates for ( ; t < tend ; t++) { // get the t-th tuple, a unique tuple int64_t i = I_work [t] ; int64_t k = (K_work == NULL) ? t : K_work [t] ; ASSERT (i >= 0) ; // Tx [my_tnz] = S [k] ; with typecast GB_CAST_ARRAY_TO_ARRAY (Tx, my_tnz, S, k) ; Ti [my_tnz] = i ; // assemble all duplicates that follow it. This may assemble // the first duplicates in the next slice(s) (up to but not // including the first unique tuple in the subsequent slice(s)). for ( ; t+1 < nvals && I_work [t+1] < 0 ; t++) { // assemble the duplicate tuple int64_t k = (K_work == NULL) ? (t+1) : K_work [t+1] ; // Tx [my_tnz] += S [k] with typecast GB_ADD_CAST_ARRAY_TO_ARRAY (Tx, my_tnz, S, k) ; } my_tnz++ ; } } } }
// ------------------------------------------------------------------------------ //GB_build_template:T = build(S), and assemble any duplicate tuples // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS / Doc / License.txt for license . // ------------------------------------------------------------------------------ //This template is used in GB_builder and the Generated / GB_red_build__ * //workers.This is the same for both vectors and matrices, since this step is // agnostic about which vectors the entries appear. { //k unused for some uses of this template #include "GB_unused.h" if (ndupl == 0) { //---------------------------------------------------------------------- //no duplicates, just permute S into Tx // ---------------------------------------------------------------------- //If no duplicates are present, then GB_builder has already // transplanted I_work into T->i, so this step does not need to // construct T->i.The tuple values, in S, are copied or permuted into // T->x. if (K_work == NULL) { for (int tid = 0; tid < nthreads; tid++) { int64_t tstart = tstart_slice[tid]; int64_t tend = tstart_slice[tid + 1]; for (int64_t t = tstart; t < tend; t++) { //Tx[t] = (ttype) S[t]; with typecast GB_CAST_ARRAY_TO_ARRAY(Tx, t, S, t); } } } else { for (int tid = 0; tid < nthreads; tid++) { int64_t tstart = tstart_slice[tid]; int64_t tend = tstart_slice[tid + 1]; for (int64_t t = tstart; t < tend; t++) { //Tx[t] = (ttype) S[K_work[t]]; with typecast GB_CAST_ARRAY_TO_ARRAY(Tx, t, S, K_work[t]); } } } } else { //---------------------------------------------------------------------- //assemble duplicates // ---------------------------------------------------------------------- //Entries in S must be copied into T->x, with any duplicates summed // via the operator.T->i must also be constructed. for (int tid = 0; tid < nthreads; tid++) { int64_t my_tnz = tnz_slice[tid]; int64_t tstart = tstart_slice[tid]; int64_t tend = tstart_slice[tid + 1]; //find the first unique tuple owned by this slice int64_t t; for (t = tstart; t < tend; t++) { //get the tuple and break if it is not a duplicate if (I_work[t] >= 0) break; } //scan all tuples and assemble any duplicates for (; t < tend; t++) { //get the t - th tuple, a unique tuple int64_t i = I_work[t]; int64_t k = (K_work == NULL) ? t : K_work[t]; ASSERT(i >= 0); //Tx[my_tnz] = S[k]; with typecast GB_CAST_ARRAY_TO_ARRAY(Tx, my_tnz, S, k); Ti[my_tnz] = i; //assemble all duplicates that follow it.This may assemble // the first duplicates in the next slice(s) (up to but not // including the first unique tuple in the subsequent slice(s)). for (; t + 1 < nvals && I_work[t + 1] < 0; t++) { //assemble the duplicate tuple int64_t k = (K_work == NULL) ? (t + 1) : K_work[t + 1]; //Tx[my_tnz] += S[k] with typecast GB_ADD_CAST_ARRAY_TO_ARRAY(Tx, my_tnz, S, k); } my_tnz++; } } } }
// ------------------------------------------------------------------------------ //GB_build_template:T = build(S), and assemble any duplicate tuples // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS / Doc / License.txt for license . // ------------------------------------------------------------------------------ //This template is used in GB_builder and the Generated / GB_red_build__ * //workers.This is the same for both vectors and matrices, since this step is // agnostic about which vectors the entries appear. { //k unused for some uses of this template #include "GB_unused.h" if (ndupl == 0) { //---------------------------------------------------------------------- //no duplicates, just permute S into Tx // ---------------------------------------------------------------------- //If no duplicates are present, then GB_builder has already // transplanted I_work into T->i, so this step does not need to // construct T->i.The tuple values, in S, are copied or permuted into // T->x. if (K_work == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0; tid < nthreads; tid++) { int64_t tstart = tstart_slice[tid]; int64_t tend = tstart_slice[tid + 1]; for (int64_t t = tstart; t < tend; t++) { //Tx[t] = (ttype) S[t]; with typecast GB_CAST_ARRAY_TO_ARRAY(Tx, t, S, t); } } } else { #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0; tid < nthreads; tid++) { int64_t tstart = tstart_slice[tid]; int64_t tend = tstart_slice[tid + 1]; for (int64_t t = tstart; t < tend; t++) { //Tx[t] = (ttype) S[K_work[t]]; with typecast GB_CAST_ARRAY_TO_ARRAY(Tx, t, S, K_work[t]); } } } } else { //---------------------------------------------------------------------- //assemble duplicates // ---------------------------------------------------------------------- //Entries in S must be copied into T->x, with any duplicates summed // via the operator.T->i must also be constructed. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0; tid < nthreads; tid++) { int64_t my_tnz = tnz_slice[tid]; int64_t tstart = tstart_slice[tid]; int64_t tend = tstart_slice[tid + 1]; //find the first unique tuple owned by this slice int64_t t; for (t = tstart; t < tend; t++) { //get the tuple and break if it is not a duplicate if (I_work[t] >= 0) break; } //scan all tuples and assemble any duplicates for (; t < tend; t++) { //get the t - th tuple, a unique tuple int64_t i = I_work[t]; int64_t k = (K_work == NULL) ? t : K_work[t]; ASSERT(i >= 0); //Tx[my_tnz] = S[k]; with typecast GB_CAST_ARRAY_TO_ARRAY(Tx, my_tnz, S, k); Ti[my_tnz] = i; //assemble all duplicates that follow it.This may assemble // the first duplicates in the next slice(s) (up to but not // including the first unique tuple in the subsequent slice(s)). for (; t + 1 < nvals && I_work[t + 1] < 0; t++) { //assemble the duplicate tuple int64_t k = (K_work == NULL) ? (t + 1) : K_work[t + 1]; //Tx[my_tnz] += S[k] with typecast GB_ADD_CAST_ARRAY_TO_ARRAY(Tx, my_tnz, S, k); } my_tnz++; } } } }
cycles.h
/* * Copyright 2018-2021 Kyle Berney * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CYCLES_H #define CYCLES_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "params.h" //Performs the equidistant gather on r root elements and l leaf elements //Assumes r <= l template<typename TYPE> void equidistant_gather(TYPE *A, uint64_t r, uint64_t l) { for (uint64_t i = 0; i < r; ++i) { //for each of the r cycles TYPE temp = A[(i+1)*(l+1) - 1]; //i-th root element for (uint64_t j = (i+1)*(l+1) - 1; j > i; j -= l) { A[j] = A[j-l]; } A[i] = temp; } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0-indexed //right shift of r - i or left shift of l - (r - i) //A[r + i*l] to A[r + (i+1)*l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right<TYPE>(&A[r + i*l], l, kr); } else if (kl != 0 && kl != l) { shift_left<TYPE>(&A[r + i*l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements using p threads //Assumes r <= l template<typename TYPE> void equidistant_gather_parallel(TYPE *A, uint64_t r, uint64_t l, uint32_t p) { #pragma omp parallel for shared(A, r, l) schedule(guided, B) num_threads(p) for (uint64_t i = 0; i < r; ++i) { //for each of the r cycles TYPE temp = A[(i+1)*(l+1) - 1]; //i-th root element for (uint64_t j = (i+1)*(l+1) - 1; j > i; j -= l) { A[j] = A[j-l]; } A[i] = temp; } #pragma omp parallel for shared(A, r, l) schedule(guided) num_threads(p) for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0-indexed //right shift of r - i or left shift of l - (r - i) //A[r + i*l] to A[r + (i+1)*l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right<TYPE>(&A[r + i*l], l, kr); } else if (kl != 0 && kl != l) { shift_left<TYPE>(&A[r + i*l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements //Executes min(r, B) cycles at once //Assumes r <= l template<typename TYPE> void equidistant_gather_io(TYPE *A, uint64_t r, uint64_t l) { uint32_t b = (r < B) ? r : B; for (uint64_t i = 0; i < r; i += b) { //perform b cycles simultaneously if (i > r - b) { //last chunk may be incomplete b = r - (r/b)*b; } TYPE temp[b]; //Load all root elements into temp for (uint64_t j = 0; j < b; ++j) { temp[j] = A[(i+j+1)*(l+1) - 1]; //(i+j)-th root element } //(b - 1) steps, not all cycles "participate" uint64_t idx = (i+b)*(l+1) - 1; for (uint64_t k = 1; k <= b - 1; ++k) { for (uint64_t j = 0; j < k; ++j) { //k cycles "participate" A[idx - j] = A[idx - j - l]; } idx -= l; } //Remainder (i + 2) steps, all cycles "participate" for (; idx > i+b-1; idx -= l) { for (uint64_t j = 0; j < b; ++j) { A[idx - j] = A[idx - j - l]; } } //Last step, write root elements to desired location for (uint64_t j = 0; j < b; ++j) { A[i + j] = temp[j]; } } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0-indexed //right shift of r - i or left shift of l - (r - i) //A[r + i*l] to A[r + (i+1)*l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right<TYPE>(&A[r + i*l], l, kr); } else if (kl != 0 && kl != l) { shift_left<TYPE>(&A[r + i*l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements using p threads //Each thread executes min(r, B) cycles at once //Assumes r <= l template<typename TYPE> void equidistant_gather_io_parallel(TYPE *A, uint64_t r, uint64_t l, uint32_t p) { #pragma omp parallel for shared(A, r, l, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < r; i += B) { //perform b cycles simultaneously uint32_t b = (r < B) ? r : B; if (i > r - b) { //last chunk may be incomplete b = r - (r/b)*b; } TYPE temp[b]; //Load all root elements into temp for (uint64_t j = 0; j < b; ++j) { temp[j] = A[(i+j+1)*(l+1) - 1]; //(i+j)-th root element } //(b - 1) steps, not all cycles "participate" uint64_t idx = (i+b)*(l+1) - 1; for (uint64_t k = 1; k <= b - 1; ++k) { for (uint64_t j = 0; j < k; ++j) { //k cycles "participate" A[idx - j] = A[idx - j - l]; } idx -= l; } //Remainder (i + 2) steps, all cycles "participate" for (; idx > i+b-1; idx -= l) { for (uint64_t j = 0; j < b; ++j) { A[idx - j] = A[idx - j - l]; } } //Last step, write root elements to desired location for (uint64_t j = 0; j < b; ++j) { A[i + j] = temp[j]; } } #pragma omp parallel for shared(A, r, l, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0-indexed //right shift of r - i or left shift of l - (r - i) //A[r + i*l] to A[r + (i+1)*l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right<TYPE>(&A[r + i*l], l, kr); } else if (kl != 0 && kl != l) { shift_left<TYPE>(&A[r + i*l], l, kl); } } } //Performs the equidistant gather on m root elements and m leaf elements in chunks of size c template<typename TYPE> void equidistant_gather_chunks(TYPE *A, uint64_t m, uint64_t c) { for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; for (uint64_t j = 0; j < c; j += b) { if (j > c - b) { //last chunk may be incomplete b = c - (c/b)*b; } for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i+1)*(m+1) - 1)*c + j + k]; } for (uint64_t k = ((i+1)*(m+1) - 1)*c; k > i*c; k -= m*c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m*c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i*c + j + k] = temp[k]; } } } for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i)*c; uint64_t kl = i*c; if (kr <= kl && kr != 0 && kr != m*c) { shift_right<TYPE>(&A[(i+1)*m*c], m*c, kr); } else if (kl != 0 && kl != m*c) { shift_left<TYPE>(&A[(i+1)*m*c], m*c, kl); } } } //Performs the equidistant gather on m root elements and m leaf elements in chunks of size c //Assumes chunk size is larger than B template<typename TYPE> void equidistant_gather_chunks_parallel(TYPE *A, uint64_t m, uint64_t c, uint32_t p) { if (p <= m) { #pragma omp parallel for shared(A, m, c, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; for (uint64_t j = 0; j < c; j += b) { if (j > c - b) { //last chunk may be incomplete b = c - (c/b)*b; } for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i+1)*(m+1) - 1)*c + j + k]; } for (uint64_t k = ((i+1)*(m+1) - 1)*c; k > i*c; k -= m*c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m*c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i*c + j + k] = temp[k]; } } } #pragma omp parallel for shared(A, m, c, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i)*c; uint64_t kl = i*c; if (kr <= kl && kr != 0 && kr != m*c) { shift_right<TYPE>(&A[(i+1)*m*c], m*c, kr); } else if (kl != 0 && kl != m*c) { shift_left<TYPE>(&A[(i+1)*m*c], m*c, kl); } } } else { //p > m; i.e., more processors than cycles uint32_t threads_per = ceil(p/(double)m); #pragma omp parallel for shared(A, m, c, p, threads_per) num_threads(m) for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; uint32_t remainder = c % b; if (remainder == 0) { #pragma omp parallel for shared(A, m, c, p, threads_per, i, b) private(temp) schedule(guided) num_threads(threads_per) for (uint64_t j = 0; j < c; j += b) { for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i+1)*(m+1) - 1)*c + j + k]; } for (uint64_t k = ((i+1)*(m+1) - 1)*c; k > i*c; k -= m*c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m*c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i*c + j + k] = temp[k]; } } } else { #pragma omp parallel for shared(A, m, c, p, threads_per, i, b, remainder) private(temp) schedule(guided) num_threads(threads_per) for (uint64_t j = 0; j < c - remainder; j += b) { for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i+1)*(m+1) - 1)*c + j + k]; } for (uint64_t k = ((i+1)*(m+1) - 1)*c; k > i*c; k -= m*c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m*c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i*c + j + k] = temp[k]; } } //Last block is size remainder uint64_t j = c - remainder; for (uint32_t k = 0; k < remainder; ++k) { temp[k] = A[((i+1)*(m+1) - 1)*c + j + k]; } for (uint64_t k = ((i+1)*(m+1) - 1)*c; k > i*c; k -= m*c) { for (uint32_t x = 0; x < remainder; ++x) { A[k + j + x] = A[k - m*c + j + x]; } } for (uint32_t k = 0; k < remainder; ++k) { A[i*c + j + k] = temp[k]; } } } #pragma omp parallel for shared(A, m, c, p) schedule(guided) num_threads(m) for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i)*c; uint64_t kl = i*c; if (kr <= kl && kr != 0 && kr != m*c) { shift_right_parallel<TYPE>(&A[(i+1)*m*c], m*c, kr, threads_per); } else if (kl != 0 && kl != m*c) { shift_left_parallel<TYPE>(&A[(i+1)*m*c], m*c, kl, threads_per); } } } } //Performs the extended equidistant gather for n = (b+1)^d - 1, where d is an arbitrary integer template<typename TYPE> void extended_equidistant_gather(TYPE *A, uint64_t n, uint64_t b) { uint64_t m = n/(b+1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io<TYPE>(A, m, m); } else { //recurse on (b+1) partitions for (uint64_t i = 0; i < b+1; ++i) { extended_equidistant_gather<TYPE>(&A[i*m + i], m, b); } //merge partitions via equidistant gather of chunks of size c = ceil{m/(B+1)} on &A[c-1] uint64_t c = ceil(m / (double)(b+1)); equidistant_gather_chunks<TYPE>(&A[c-1], b, c); } } //Performs the extended equidistant gather for n = (b+1)^d - 1, where d is an arbitrary integer template<typename TYPE> void extended_equidistant_gather_parallel(TYPE *A, uint64_t n, uint64_t b, uint32_t p) { uint64_t m = n/(b+1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io_parallel<TYPE>(A, m, m, p); } else { if (p <= b+1) { //recurse on (b+1) partitions #pragma omp parallel for shared(A, n, b, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < b+1; ++i) { extended_equidistant_gather<TYPE>(&A[i*m + i], m, b); } } else { uint32_t threads_per = ceil(p/(double)(b+1)); //recurse on (b+1) partitions #pragma omp parallel for shared(A, n, b, p) schedule(guided) num_threads(b+1) for (uint64_t i = 0; i < b+1; ++i) { extended_equidistant_gather_parallel<TYPE>(&A[i*m + i], m, b, threads_per); } } //merge partitions via equidistant gather of chunks of size c = ceil{m/(B+1)} on &A[c-1] uint64_t c = ceil(m / (double)(b+1)); equidistant_gather_chunks_parallel<TYPE>(&A[c-1], b, c, p); } } //Performs the extended equidistant gather for n = m(b+1), where m = n/(b+1) template<typename TYPE> void extended_equidistant_gather2(TYPE *A, uint64_t n, uint64_t b) { uint64_t m = n/(b+1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io<TYPE>(A, m, b); } else { uint64_t r = m % (b+1); if (r == 0) { //recurse on (b+1) partitions for (uint64_t i = 0; i < b+1; ++i) { extended_equidistant_gather2<TYPE>(&A[i*m], m, b); } //merge partitions via equidistant gather of chunks of size c = m/(B+1) on &A[c] uint64_t c = m/(b+1); equidistant_gather_chunks<TYPE>(&A[c], (m - c)/c, c); } else { uint64_t size = r * (b+1); extended_equidistant_gather2<TYPE>(A, n - size, b); extended_equidistant_gather2<TYPE>(&A[n - size], size, b); shift_right<TYPE>(&A[m-r], (m-r)*b + r, r); } } } //Performs the extended equidistant gather for n = m(b+1), where m = n/(b+1) template<typename TYPE> void extended_equidistant_gather2_parallel(TYPE *A, uint64_t n, uint64_t b, uint32_t p) { uint64_t m = n/(b+1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io_parallel<TYPE>(A, m, b, p); } else { uint64_t r = m % (b+1); if (r == 0) { if (p <= b+1) { //recurse on (b+1) partitions #pragma omp parallel for shared(A, n, b, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < b+1; ++i) { extended_equidistant_gather2<TYPE>(&A[i*m], m, b); } } else { uint32_t threads_per = ceil(p/(double)(b+1)); //recurse on (b+1) partitions #pragma omp parallel for shared(A, n, b, p, threads_per) num_threads(b+1) for (uint64_t i = 0; i < b+1; ++i) { extended_equidistant_gather2_parallel<TYPE>(&A[i*m], m, b, threads_per); } } //merge partitions via equidistant gather of chunks of size c = m/(B+1) on &A[c] uint64_t c = m/(b+1); equidistant_gather_chunks_parallel<TYPE>(&A[c], (m - c)/c, c, p); } else { uint64_t size = r * (b+1); //Parallel Solution #1 extended_equidistant_gather2_parallel<TYPE>(A, n - size, b, p); extended_equidistant_gather2_parallel<TYPE>(&A[n - size], size, b, p); /*//Parallel Solution #2 if (p > 2) { #pragma omp parallel sections num_threads(2) { #pragma omp section { //extended_equidistant_gather2_parallel<TYPE>(A, n - size, b, ceil(p/2.)); extended_equidistant_gather2_parallel<TYPE>(A, n - size, b, ceil((p/(double)n)*(n - size))); } #pragma omp section { //extended_equidistant_gather2_parallel<TYPE>(&A[n - size], size, b, ceil(p/2.)); extended_equidistant_gather2_parallel<TYPE>(&A[n - size], size, b, ceil((p/(double)n)*size)); } } } else { #pragma omp parallel sections num_threads(2) { #pragma omp section { extended_equidistant_gather2<TYPE>(A, n - size, b); } #pragma omp section { extended_equidistant_gather2<TYPE>(&A[n - size], size, b); } } }*/ shift_right_parallel<TYPE>(&A[m-r], (m-r)*b + r, r, p); } } } #endif
#ifndef CYCLES_H #define CYCLES_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "params.h" // Performs the equidistant gather on r root elements and l leaf elements // Assumes r <= l template < typename TYPE > void equidistant_gather(TYPE * A, uint64_t r, uint64_t l) { for (uint64_t i = 0; i < r; ++i) { //for each of the r cycles TYPE temp = A[(i + 1) * (l + 1) - 1]; //i - th root element for (uint64_t j = (i + 1) * (l + 1) - 1; j > i; j -= l) { A[j] = A[j - l]; } A[i] = temp; } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements using p threads // Assumes r <= l template < typename TYPE > void equidistant_gather_parallel(TYPE * A, uint64_t r, uint64_t l, uint32_t p) { for (uint64_t i = 0; i < r; ++i) { //for each of the r cycles TYPE temp = A[(i + 1) * (l + 1) - 1]; //i - th root element for (uint64_t j = (i + 1) * (l + 1) - 1; j > i; j -= l) { A[j] = A[j - l]; } A[i] = temp; } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements // Executes min(r, B) cycles at once // Assumes r <= l template < typename TYPE > void equidistant_gather_io(TYPE * A, uint64_t r, uint64_t l) { uint32_t b = (r < B) ? r : B; for (uint64_t i = 0; i < r; i += b) { //perform b cycles simultaneously if (i > r - b) { //last chunk may be incomplete b = r - (r / b) * b; } TYPE temp[b]; //Load all root elements into temp for (uint64_t j = 0; j < b; ++j) { temp[j] = A[(i + j + 1) * (l + 1) - 1]; //(i + j) - th root element } //(b - 1) steps, not all cycles "participate" uint64_t idx = (i + b) * (l + 1) - 1; for (uint64_t k = 1; k <= b - 1; ++k) { for (uint64_t j = 0; j < k; ++j) { //k cycles "participate" A[idx - j] = A[idx - j - l]; } idx -= l; } //Remainder(i + 2) steps, all cycles "participate" for (; idx > i + b - 1; idx -= l) { for (uint64_t j = 0; j < b; ++j) { A[idx - j] = A[idx - j - l]; } } //Last step, write root elements to desired location for (uint64_t j = 0; j < b; ++j) { A[i + j] = temp[j]; } } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements using p threads // Each thread executes min(r, B) cycles at once // Assumes r <= l template < typename TYPE > void equidistant_gather_io_parallel(TYPE * A, uint64_t r, uint64_t l, uint32_t p) { for (uint64_t i = 0; i < r; i += B) { //perform b cycles simultaneously uint32_t b = (r < B) ? r : B; if (i > r - b) { //last chunk may be incomplete b = r - (r / b) * b; } TYPE temp[b]; //Load all root elements into temp for (uint64_t j = 0; j < b; ++j) { temp[j] = A[(i + j + 1) * (l + 1) - 1]; //(i + j) - th root element } //(b - 1) steps, not all cycles "participate" uint64_t idx = (i + b) * (l + 1) - 1; for (uint64_t k = 1; k <= b - 1; ++k) { for (uint64_t j = 0; j < k; ++j) { //k cycles "participate" A[idx - j] = A[idx - j - l]; } idx -= l; } //Remainder(i + 2) steps, all cycles "participate" for (; idx > i + b - 1; idx -= l) { for (uint64_t j = 0; j < b; ++j) { A[idx - j] = A[idx - j - l]; } } //Last step, write root elements to desired location for (uint64_t j = 0; j < b; ++j) { A[i + j] = temp[j]; } } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on m root elements and m leaf elements in chunks of size c template < typename TYPE > void equidistant_gather_chunks(TYPE * A, uint64_t m, uint64_t c) { for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; for (uint64_t j = 0; j < c; j += b) { if (j > c - b) { //last chunk may be incomplete b = c - (c / b) * b; } for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } } for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i) * c; uint64_t kl = i * c; if (kr <= kl && kr != 0 && kr != m * c) { shift_right < TYPE > (&A[(i + 1) * m * c], m * c, kr); } else if (kl != 0 && kl != m * c) { shift_left < TYPE > (&A[(i + 1) * m * c], m * c, kl); } } } //Performs the equidistant gather on m root elements and m leaf elements in chunks of size c // Assumes chunk size is larger than B template < typename TYPE > void equidistant_gather_chunks_parallel(TYPE * A, uint64_t m, uint64_t c, uint32_t p) { if (p <= m) { for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; for (uint64_t j = 0; j < c; j += b) { if (j > c - b) { //last chunk may be incomplete b = c - (c / b) * b; } for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } } for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i) * c; uint64_t kl = i * c; if (kr <= kl && kr != 0 && kr != m * c) { shift_right < TYPE > (&A[(i + 1) * m * c], m * c, kr); } else if (kl != 0 && kl != m * c) { shift_left < TYPE > (&A[(i + 1) * m * c], m * c, kl); } } } else { //p > m; i.e., more processors than cycles uint32_t threads_per = ceil(p / (double)m); for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; uint32_t remainder = c % b; if (remainder == 0) { for (uint64_t j = 0; j < c; j += b) { for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } } else { for (uint64_t j = 0; j < c - remainder; j += b) { for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } //Last block is size remainder uint64_t j = c - remainder; for (uint32_t k = 0; k < remainder; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < remainder; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < remainder; ++k) { A[i * c + j + k] = temp[k]; } } } for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i) * c; uint64_t kl = i * c; if (kr <= kl && kr != 0 && kr != m * c) { shift_right_parallel < TYPE > (&A[(i + 1) * m * c], m * c, kr, threads_per); } else if (kl != 0 && kl != m * c) { shift_left_parallel < TYPE > (&A[(i + 1) * m * c], m * c, kl, threads_per); } } } } //Performs the extended equidistant gather for n = (b + 1) ^ d - 1, where d is an arbitrary integer template < typename TYPE > void extended_equidistant_gather(TYPE * A, uint64_t n, uint64_t b) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io < TYPE > (A, m, m); } else { //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather < TYPE > (&A[i * m + i], m, b); } //merge partitions via equidistant gather of chunks of size c = ceil { m / (B + 1) } on & A[c - 1] uint64_t c = ceil(m / (double)(b + 1)); equidistant_gather_chunks < TYPE > (&A[c - 1], b, c); } } //Performs the extended equidistant gather for n = (b + 1) ^ d - 1, where d is an arbitrary integer template < typename TYPE > void extended_equidistant_gather_parallel(TYPE * A, uint64_t n, uint64_t b, uint32_t p) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io_parallel < TYPE > (A, m, m, p); } else { if (p <= b + 1) { //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather < TYPE > (&A[i * m + i], m, b); } } else { uint32_t threads_per = ceil(p / (double)(b + 1)); //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather_parallel < TYPE > (&A[i * m + i], m, b, threads_per); } } //merge partitions via equidistant gather of chunks of size c = ceil { m / (B + 1) } on & A[c - 1] uint64_t c = ceil(m / (double)(b + 1)); equidistant_gather_chunks_parallel < TYPE > (&A[c - 1], b, c, p); } } //Performs the extended equidistant gather for n = m(b + 1), where m = n / (b + 1) template < typename TYPE > void extended_equidistant_gather2(TYPE * A, uint64_t n, uint64_t b) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io < TYPE > (A, m, b); } else { uint64_t r = m % (b + 1); if (r == 0) { //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather2 < TYPE > (&A[i * m], m, b); } //merge partitions via equidistant gather of chunks of size c = m / (B + 1) on & A[c] uint64_t c = m / (b + 1); equidistant_gather_chunks < TYPE > (&A[c], (m - c) / c, c); } else { uint64_t size = r * (b + 1); extended_equidistant_gather2 < TYPE > (A, n - size, b); extended_equidistant_gather2 < TYPE > (&A[n - size], size, b); shift_right < TYPE > (&A[m - r], (m - r) * b + r, r); } } } //Performs the extended equidistant gather for n = m(b + 1), where m = n / (b + 1) template < typename TYPE > void extended_equidistant_gather2_parallel(TYPE * A, uint64_t n, uint64_t b, uint32_t p) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io_parallel < TYPE > (A, m, b, p); } else { uint64_t r = m % (b + 1); if (r == 0) { if (p <= b + 1) { //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather2 < TYPE > (&A[i * m], m, b); } } else { uint32_t threads_per = ceil(p / (double)(b + 1)); //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather2_parallel < TYPE > (&A[i * m], m, b, threads_per); } } //merge partitions via equidistant gather of chunks of size c = m / (B + 1) on & A[c] uint64_t c = m / (b + 1); equidistant_gather_chunks_parallel < TYPE > (&A[c], (m - c) / c, c, p); } else { uint64_t size = r * (b + 1); //Parallel Solution #1 extended_equidistant_gather2_parallel < TYPE > (A, n - size, b, p); extended_equidistant_gather2_parallel < TYPE > (&A[n - size], size, b, p); /* * //Parallel Solution #2 if (p > 2) { #pragma omp section { * //extended_equidistant_gather2_parallel<TYPE>(A, n - size, b, * ceil(p/2.)); extended_equidistant_gather2_parallel<TYPE>(A, n * - size, b, ceil((p/(double)n)*(n - size))); } * //extended_equidistant_gather2_parallel<TYPE>(&A[n - size], * size, b, ceil(p/2.)); * extended_equidistant_gather2_parallel<TYPE>(&A[n - size], * size, b, ceil((p/(double)n)*size)); * * * } else { #pragma omp section { * extended_equidistant_gather2<TYPE>(A, n - size, b); } * extended_equidistant_gather2<TYPE>(&A[n - size], size, b); * * * } */ shift_right_parallel < TYPE > (&A[m - r], (m - r) * b + r, r, p); } } }
#ifndef CYCLES_H #define CYCLES_H #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "params.h" // Performs the equidistant gather on r root elements and l leaf elements // Assumes r <= l template < typename TYPE > void equidistant_gather(TYPE * A, uint64_t r, uint64_t l) { for (uint64_t i = 0; i < r; ++i) { //for each of the r cycles TYPE temp = A[(i + 1) * (l + 1) - 1]; //i - th root element for (uint64_t j = (i + 1) * (l + 1) - 1; j > i; j -= l) { A[j] = A[j - l]; } A[i] = temp; } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements using p threads // Assumes r <= l template < typename TYPE > void equidistant_gather_parallel(TYPE * A, uint64_t r, uint64_t l, uint32_t p) { #pragma omp parallel for shared(A, r, l) schedule(guided, B) num_threads(p) for (uint64_t i = 0; i < r; ++i) { //for each of the r cycles TYPE temp = A[(i + 1) * (l + 1) - 1]; //i - th root element for (uint64_t j = (i + 1) * (l + 1) - 1; j > i; j -= l) { A[j] = A[j - l]; } A[i] = temp; } #pragma omp parallel for shared(A, r, l) schedule(guided) num_threads(p) for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements // Executes min(r, B) cycles at once // Assumes r <= l template < typename TYPE > void equidistant_gather_io(TYPE * A, uint64_t r, uint64_t l) { uint32_t b = (r < B) ? r : B; for (uint64_t i = 0; i < r; i += b) { //perform b cycles simultaneously if (i > r - b) { //last chunk may be incomplete b = r - (r / b) * b; } TYPE temp[b]; //Load all root elements into temp for (uint64_t j = 0; j < b; ++j) { temp[j] = A[(i + j + 1) * (l + 1) - 1]; //(i + j) - th root element } //(b - 1) steps, not all cycles "participate" uint64_t idx = (i + b) * (l + 1) - 1; for (uint64_t k = 1; k <= b - 1; ++k) { for (uint64_t j = 0; j < k; ++j) { //k cycles "participate" A[idx - j] = A[idx - j - l]; } idx -= l; } //Remainder(i + 2) steps, all cycles "participate" for (; idx > i + b - 1; idx -= l) { for (uint64_t j = 0; j < b; ++j) { A[idx - j] = A[idx - j - l]; } } //Last step, write root elements to desired location for (uint64_t j = 0; j < b; ++j) { A[i + j] = temp[j]; } } for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on r root elements and l leaf elements using p threads // Each thread executes min(r, B) cycles at once // Assumes r <= l template < typename TYPE > void equidistant_gather_io_parallel(TYPE * A, uint64_t r, uint64_t l, uint32_t p) { #pragma omp parallel for shared(A, r, l, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < r; i += B) { //perform b cycles simultaneously uint32_t b = (r < B) ? r : B; if (i > r - b) { //last chunk may be incomplete b = r - (r / b) * b; } TYPE temp[b]; //Load all root elements into temp for (uint64_t j = 0; j < b; ++j) { temp[j] = A[(i + j + 1) * (l + 1) - 1]; //(i + j) - th root element } //(b - 1) steps, not all cycles "participate" uint64_t idx = (i + b) * (l + 1) - 1; for (uint64_t k = 1; k <= b - 1; ++k) { for (uint64_t j = 0; j < k; ++j) { //k cycles "participate" A[idx - j] = A[idx - j - l]; } idx -= l; } //Remainder(i + 2) steps, all cycles "participate" for (; idx > i + b - 1; idx -= l) { for (uint64_t j = 0; j < b; ++j) { A[idx - j] = A[idx - j - l]; } } //Last step, write root elements to desired location for (uint64_t j = 0; j < b; ++j) { A[i + j] = temp[j]; } } #pragma omp parallel for shared(A, r, l, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < r; ++i) { //leaf subtrees are 0 - indexed // right shift of r - i or left shift of l - (r - i) // A[r + i * l] to A[r + (i + 1) * l - 1] uint64_t kr = r - i; uint64_t kl = l - r + i; if (kr <= kl && kr != 0 && kr != l) { shift_right < TYPE > (&A[r + i * l], l, kr); } else if (kl != 0 && kl != l) { shift_left < TYPE > (&A[r + i * l], l, kl); } } } //Performs the equidistant gather on m root elements and m leaf elements in chunks of size c template < typename TYPE > void equidistant_gather_chunks(TYPE * A, uint64_t m, uint64_t c) { for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; for (uint64_t j = 0; j < c; j += b) { if (j > c - b) { //last chunk may be incomplete b = c - (c / b) * b; } for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } } for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i) * c; uint64_t kl = i * c; if (kr <= kl && kr != 0 && kr != m * c) { shift_right < TYPE > (&A[(i + 1) * m * c], m * c, kr); } else if (kl != 0 && kl != m * c) { shift_left < TYPE > (&A[(i + 1) * m * c], m * c, kl); } } } //Performs the equidistant gather on m root elements and m leaf elements in chunks of size c // Assumes chunk size is larger than B template < typename TYPE > void equidistant_gather_chunks_parallel(TYPE * A, uint64_t m, uint64_t c, uint32_t p) { if (p <= m) { #pragma omp parallel for shared(A, m, c, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; for (uint64_t j = 0; j < c; j += b) { if (j > c - b) { //last chunk may be incomplete b = c - (c / b) * b; } for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } } #pragma omp parallel for shared(A, m, c, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i) * c; uint64_t kl = i * c; if (kr <= kl && kr != 0 && kr != m * c) { shift_right < TYPE > (&A[(i + 1) * m * c], m * c, kr); } else if (kl != 0 && kl != m * c) { shift_left < TYPE > (&A[(i + 1) * m * c], m * c, kl); } } } else { //p > m; i.e., more processors than cycles uint32_t threads_per = ceil(p / (double)m); #pragma omp parallel for shared(A, m, c, p, threads_per) num_threads(m) for (uint64_t i = 0; i < m; ++i) { uint32_t b = (B < c) ? B : c; TYPE temp[b]; uint32_t remainder = c % b; if (remainder == 0) { #pragma omp parallel for shared(A, m, c, p, threads_per, i, b) private(temp) schedule(guided) num_threads(threads_per) for (uint64_t j = 0; j < c; j += b) { for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } } else { #pragma omp parallel for shared(A, m, c, p, threads_per, i, b, remainder) private(temp) schedule(guided) num_threads(threads_per) for (uint64_t j = 0; j < c - remainder; j += b) { for (uint32_t k = 0; k < b; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < b; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < b; ++k) { A[i * c + j + k] = temp[k]; } } //Last block is size remainder uint64_t j = c - remainder; for (uint32_t k = 0; k < remainder; ++k) { temp[k] = A[((i + 1) * (m + 1) - 1) * c + j + k]; } for (uint64_t k = ((i + 1) * (m + 1) - 1) * c; k > i * c; k -= m * c) { for (uint32_t x = 0; x < remainder; ++x) { A[k + j + x] = A[k - m * c + j + x]; } } for (uint32_t k = 0; k < remainder; ++k) { A[i * c + j + k] = temp[k]; } } } #pragma omp parallel for shared(A, m, c, p) schedule(guided) num_threads(m) for (uint64_t i = 0; i < m; ++i) { uint64_t kr = (m - i) * c; uint64_t kl = i * c; if (kr <= kl && kr != 0 && kr != m * c) { shift_right_parallel < TYPE > (&A[(i + 1) * m * c], m * c, kr, threads_per); } else if (kl != 0 && kl != m * c) { shift_left_parallel < TYPE > (&A[(i + 1) * m * c], m * c, kl, threads_per); } } } } //Performs the extended equidistant gather for n = (b + 1) ^ d - 1, where d is an arbitrary integer template < typename TYPE > void extended_equidistant_gather(TYPE * A, uint64_t n, uint64_t b) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io < TYPE > (A, m, m); } else { //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather < TYPE > (&A[i * m + i], m, b); } //merge partitions via equidistant gather of chunks of size c = ceil { m / (B + 1) } on & A[c - 1] uint64_t c = ceil(m / (double)(b + 1)); equidistant_gather_chunks < TYPE > (&A[c - 1], b, c); } } //Performs the extended equidistant gather for n = (b + 1) ^ d - 1, where d is an arbitrary integer template < typename TYPE > void extended_equidistant_gather_parallel(TYPE * A, uint64_t n, uint64_t b, uint32_t p) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io_parallel < TYPE > (A, m, m, p); } else { if (p <= b + 1) { //recurse on(b + 1) partitions #pragma omp parallel for shared(A, n, b, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather < TYPE > (&A[i * m + i], m, b); } } else { uint32_t threads_per = ceil(p / (double)(b + 1)); //recurse on(b + 1) partitions #pragma omp parallel for shared(A, n, b, p) schedule(guided) num_threads(b+1) for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather_parallel < TYPE > (&A[i * m + i], m, b, threads_per); } } //merge partitions via equidistant gather of chunks of size c = ceil { m / (B + 1) } on & A[c - 1] uint64_t c = ceil(m / (double)(b + 1)); equidistant_gather_chunks_parallel < TYPE > (&A[c - 1], b, c, p); } } //Performs the extended equidistant gather for n = m(b + 1), where m = n / (b + 1) template < typename TYPE > void extended_equidistant_gather2(TYPE * A, uint64_t n, uint64_t b) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io < TYPE > (A, m, b); } else { uint64_t r = m % (b + 1); if (r == 0) { //recurse on(b + 1) partitions for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather2 < TYPE > (&A[i * m], m, b); } //merge partitions via equidistant gather of chunks of size c = m / (B + 1) on & A[c] uint64_t c = m / (b + 1); equidistant_gather_chunks < TYPE > (&A[c], (m - c) / c, c); } else { uint64_t size = r * (b + 1); extended_equidistant_gather2 < TYPE > (A, n - size, b); extended_equidistant_gather2 < TYPE > (&A[n - size], size, b); shift_right < TYPE > (&A[m - r], (m - r) * b + r, r); } } } //Performs the extended equidistant gather for n = m(b + 1), where m = n / (b + 1) template < typename TYPE > void extended_equidistant_gather2_parallel(TYPE * A, uint64_t n, uint64_t b, uint32_t p) { uint64_t m = n / (b + 1); //number of internal elements if (m <= b) { //base case: perform equidistant gather equidistant_gather_io_parallel < TYPE > (A, m, b, p); } else { uint64_t r = m % (b + 1); if (r == 0) { if (p <= b + 1) { //recurse on(b + 1) partitions #pragma omp parallel for shared(A, n, b, p) schedule(guided) num_threads(p) for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather2 < TYPE > (&A[i * m], m, b); } } else { uint32_t threads_per = ceil(p / (double)(b + 1)); //recurse on(b + 1) partitions #pragma omp parallel for shared(A, n, b, p, threads_per) num_threads(b+1) for (uint64_t i = 0; i < b + 1; ++i) { extended_equidistant_gather2_parallel < TYPE > (&A[i * m], m, b, threads_per); } } //merge partitions via equidistant gather of chunks of size c = m / (B + 1) on & A[c] uint64_t c = m / (b + 1); equidistant_gather_chunks_parallel < TYPE > (&A[c], (m - c) / c, c, p); } else { uint64_t size = r * (b + 1); //Parallel Solution #1 extended_equidistant_gather2_parallel < TYPE > (A, n - size, b, p); extended_equidistant_gather2_parallel < TYPE > (&A[n - size], size, b, p); /* * //Parallel Solution #2 if (p > 2) { #pragma omp parallel * sections num_threads(2) { #pragma omp section { * //extended_equidistant_gather2_parallel<TYPE>(A, n - size, b, * ceil(p/2.)); extended_equidistant_gather2_parallel<TYPE>(A, n * - size, b, ceil((p/(double)n)*(n - size))); } #pragma omp * section { //extended_equidistant_gather2_parallel<TYPE>(&A[n - * size], size, b, ceil(p/2.)); * extended_equidistant_gather2_parallel<TYPE>(&A[n - size], * size, b, ceil((p/(double)n)*size)); } } } else { #pragma omp * parallel sections num_threads(2) { #pragma omp section { * extended_equidistant_gather2<TYPE>(A, n - size, b); } #pragma * omp section { extended_equidistant_gather2<TYPE>(&A[n - size], * size, b); } } } */ shift_right_parallel < TYPE > (&A[m - r], (m - r) * b + r, r, p); } } }
spi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b: get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads; Nthreads = atoi(argv); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number //#pragma omp parallel long int seed = Nthreads; #pragma omp parallel reduction(+:seed) srand48_r(seed,drandData+0); double starttime = omp_get_wtime(); long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; #pragma omp parallel for for (long long int n=0; n<Ntrials; n++) { double rand1; double rand2; //gererate two random numbers (use the thread id to offset drandData) drand48_r(drandData+0, &rand1); drand48_r(drandData+0, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); printf("Our estimate of pi is %g \n", pi); //#pragma omp barrier } } double endtime = omp_get_wtime(); double time = endtime-starttime; double pi = 4.0*Ncircle/ (double) (Ntotal); printf("Our final estimate of pi is %g and time is %g \n", pi, time); #pragma omp barrier free(drandData); return 0; }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b:get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads; Nthreads = atoi(argv); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data *)malloc(Nthreads * sizeof(struct drand48_data)); //Q2c:add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number // long int seed = Nthreads; srand48_r(seed, drandData + 0); double starttime = omp_get_wtime(); long long int Ntrials = 10000000; //need running tallies long long int Ntotal = 0; long long int Ncircle = 0; for (long long int n = 0; n < Ntrials; n++) { double rand1; double rand2; //gererate two random numbers(use the thread id to offset drandData) drand48_r(drandData + 0, &rand1); drand48_r(drandData + 0, &rand2); double x = -1 + 2 * rand1; //shift to[-1, 1] double y = -1 + 2 * rand2; //check if its in the circle if (sqrt(x * x + y * y) <= 1) Ncircle++; Ntotal++; if (n % 100 == 0) { double pi = 4.0 * Ncircle / (double)(n); printf("Our estimate of pi is %g \n", pi); // } } double endtime = omp_get_wtime(); double time = endtime - starttime; double pi = 4.0 * Ncircle / (double)(Ntotal); printf("Our final estimate of pi is %g and time is %g \n", pi, time); free(drandData); return 0; }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b:get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads; Nthreads = atoi(argv); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data *)malloc(Nthreads * sizeof(struct drand48_data)); //Q2c:add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number // #pragma omp parallel long int seed = Nthreads; #pragma omp parallel reduction(+:seed) srand48_r(seed, drandData + 0); double starttime = omp_get_wtime(); long long int Ntrials = 10000000; //need running tallies long long int Ntotal = 0; long long int Ncircle = 0; #pragma omp parallel for for (long long int n = 0; n < Ntrials; n++) { double rand1; double rand2; //gererate two random numbers(use the thread id to offset drandData) drand48_r(drandData + 0, &rand1); drand48_r(drandData + 0, &rand2); double x = -1 + 2 * rand1; //shift to[-1, 1] double y = -1 + 2 * rand2; //check if its in the circle if (sqrt(x * x + y * y) <= 1) Ncircle++; Ntotal++; if (n % 100 == 0) { double pi = 4.0 * Ncircle / (double)(n); printf("Our estimate of pi is %g \n", pi); // #pragma omp barrier } } double endtime = omp_get_wtime(); double time = endtime - starttime; double pi = 4.0 * Ncircle / (double)(Ntotal); printf("Our final estimate of pi is %g and time is %g \n", pi, time); #pragma omp barrier free(drandData); return 0; }
dependences_mutexinoutset.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // GCC 9 introduced codegen for mutexinoutset // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // clang 9 introduced codegen for mutexinoutset // UNSUPPORTED: clang-4, clang-5, clang-6, clang-7, clang-8 #include "callback.h" #include <omp.h> #include <math.h> #include <unistd.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp master { print_ids(0); printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value, &x); #pragma omp task depend(out : x) { x++; delay(100); } print_fuzzy_address(1); print_ids(0); #pragma omp task depend(mutexinoutset : x) { x++; delay(100); } print_fuzzy_address(2); print_ids(0); #pragma omp task depend(in : x) { x = -1; } print_ids(0); } } x++; return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_inout)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_mutexinoutset)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[THIRD_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[THIRD_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_in)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]]
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // GCC 9 introduced codegen for mutexinoutset // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // clang 9 introduced codegen for mutexinoutset // UNSUPPORTED: clang-4, clang-5, clang-6, clang-7, clang-8 #include "callback.h" #include <omp.h> #include <math.h> #include <unistd.h> int main() { int x = 0; #pragma omp master { print_ids(0); printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value, &x); #pragma omp task depend(out : x) { x++; delay(100); } print_fuzzy_address(1); print_ids(0); x++; delay(100); print_fuzzy_address(2); print_ids(0); x = -1; print_ids(0); } x++; return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_inout)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_mutexinoutset)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[THIRD_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[THIRD_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_in)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]]
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // GCC 9 introduced codegen for mutexinoutset // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // clang 9 introduced codegen for mutexinoutset // UNSUPPORTED: clang-4, clang-5, clang-6, clang-7, clang-8 #include "callback.h" #include <omp.h> #include <math.h> #include <unistd.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp master { print_ids(0); printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value, &x); #pragma omp task depend(out : x) { x++; delay(100); } print_fuzzy_address(1); print_ids(0); #pragma omp task depend(mutexinoutset : x) { x++; delay(100); } print_fuzzy_address(2); print_ids(0); #pragma omp task depend(in : x) { x = -1; } print_ids(0); } } x++; return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_inout)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_mutexinoutset)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[THIRD_TASK:[0-f]+]], codeptr_ra={{0x[0-f]+}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[THIRD_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_in)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], // CHECK-SAME: reenter_frame=[[NULL]]
GB_unop__expm1_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__expm1_fc64_fc64) // op(A') function: GB (_unop_tran__expm1_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexpm1 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexpm1 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexpm1 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__expm1_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__expm1_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__expm1_fc64_fc64) // op(A') function: GB (_unop_tran__expm1_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexpm1 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexpm1 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexpm1 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__expm1_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__expm1_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__expm1_fc64_fc64) // op(A') function: GB (_unop_tran__expm1_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cexpm1 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cexpm1 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cexpm1 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXPM1 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__expm1_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cexpm1 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__expm1_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
linear.c
#include "linear.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <lauxlib.h> #include <cblas.h> #include <lapacke.h> /* matrix orders */ static const char * const ORDERS[] = { "row", "col", NULL }; /* checks an order */ static CBLAS_ORDER checkorder (lua_State *L, int index) { switch (luaL_checkoption(L, index, "row", ORDERS)) { case 0: return CblasRowMajor; case 1: return CblasColMajor; } /* not reached */ assert(0); return (CBLAS_ORDER)0; } /* checks a transpose */ static CBLAS_TRANSPOSE checktranspose (lua_State *L, int index) { static const char * const TRANSPOSES[] = { "notrans", "trans", NULL }; switch (luaL_checkoption(L, index, "notrans", TRANSPOSES)) { case 0: return CblasNoTrans; case 1: return CblasTrans; } /* not reached */ assert(0); return (CBLAS_TRANSPOSE)0; } /* translates a transpose for LAPACK */ static char lapacktranspose (CBLAS_TRANSPOSE transpose) { switch (transpose) { case CblasNoTrans: return 'N'; case CblasTrans: return 'T'; default: /* not reached */ assert(0); return '\0'; } } /* returns an int value from a table */ static int intvalue (lua_State *L, const char *key, int dfl) { int result, isinteger; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { result = lua_tointegerx(L, -1, &isinteger); if (!isinteger) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl < 0) { luaL_error(L, "missing field " LUA_QS, key); } result = dfl; } lua_pop(L, 1); return result; } /* returns an option value from a table */ static int optionvalue (lua_State *L, const char *key, const char *dfl, const char *options[]) { const char *str; int i; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { str = lua_tostring(L, -1); if (str == NULL) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl == NULL) { luaL_error(L, "missing field " LUA_QS, key); } str = dfl; } lua_pop(L, 1); for (i = 0; options[i] != NULL; i++) { if (strcmp(options[i], str) == 0) { return i; } } luaL_error(L, "bad option " LUA_QS " in field " LUA_QS, str, key); return 0; /* not reached */ } /* raises a linear argument error */ static int argerror (lua_State *L, int index) { return luaL_argerror(L, index, lua_pushfstring(L, "vector, or matrix " "expected, got %s", luaL_typename(L, index))); } /* pushes a new vector onto the stack */ static struct vector *newvector (lua_State *L, int size) { return lualinear_newvector(L, size); } /* pushes an existing vector onto the stack */ static struct vector *wrapvector (lua_State *L, int size, float *values) { return lualinear_wrapvector(L, size, values); } /* creates a new vector */ static int vector (lua_State *L) { int size; /* process arguments */ size = luaL_checkinteger(L, 1); luaL_argcheck(L, size >= 1, 1, "bad dimension"); /* create */ newvector(L, size); return 1; } /* vector length implementation */ static int vector_len (lua_State *L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushinteger(L, x->size); return 1; } /* vector index implementation */ static int vector_index (lua_State *L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); lua_pushnumber(L, x->values[(size_t)(index - 1) * x->inc]); return 1; } /* matrix vector newindex implementation */ static int vector_newindex (lua_State *L) { struct vector *x; int index; float value; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); value = luaL_checknumber(L, 3); x->values[(size_t)(index - 1) * x->inc] = value; return 0; } /* vector next function */ static int vector_next (lua_State *L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); if (index >= 0 && index < x->size) { lua_pushinteger(L, index + 1); lua_pushnumber(L, x->values[(size_t)index]); return 2; } lua_pushnil(L); return 1; } /* vector ipairs function */ static int vector_ipairs (lua_State *L) { luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a vector */ static int vector_tostring (lua_State *L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushfstring(L, "vector: %p", x); return 1; } /* frees a vector */ static int vector_free (lua_State *L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x->ref == LUA_NOREF) { free(x->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, x->ref); } return 0; } /* pushes a new matrix onto the stack */ static struct matrix *newmatrix (lua_State *L, int rows, int cols, CBLAS_ORDER order) { return lualinear_newmatrix(L, rows, cols, order); } /* pushes an existing matrix onto the stack */ static struct matrix *wrapmatrix (lua_State *L, int rows, int cols, CBLAS_ORDER order, float *values) { return lualinear_wrapmatrix(L, rows, cols, order, values); } /* creates a new matrix */ static int matrix (lua_State *L) { int rows, cols; CBLAS_ORDER order; /* process arguments */ rows = luaL_checkinteger(L, 1); luaL_argcheck(L, rows >= 1, 1, "bad dimension"); cols = luaL_checkinteger(L, 2); luaL_argcheck(L, cols >= 1, 2, "bad dimension"); order = checkorder(L, 3); /* create */ newmatrix(L, rows, cols, order); return 1; } /* returns the length of a matrix */ static int matrix_len (lua_State *L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); switch (X->order) { case CblasRowMajor: lua_pushinteger(L, X->rows); break; case CblasColMajor: lua_pushinteger(L, X->cols); break; } return 1; } /* matrix index implementation */ static int matrix_index (lua_State *L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; case CblasColMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[(size_t)(index - 1) * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* matrix next function */ static int matrix_next (lua_State *L) { struct matrix *X; int index, majorsize, minorsize; struct vector *x; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); switch (X->order) { case CblasRowMajor: majorsize = X->rows; minorsize = X->cols; break; case CblasColMajor: majorsize = X->cols; minorsize = X->rows; break; default: /* not reached */ assert(0); return 0; } if (index >= 0 && index < majorsize) { lua_pushinteger(L, index + 1); x = wrapvector(L, minorsize, &X->values[(size_t)index * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 2; } lua_pushnil(L); return 1; } /* matrix ipairs function */ static int matrix_ipairs (lua_State *L) { luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a matrix */ static int matrix_tostring (lua_State *L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushfstring(L, "matrix: %p", X); return 1; } /* frees a matrix */ static int matrix_free (lua_State *L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X->ref == LUA_NOREF) { free(X->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, X->ref); } return 0; } /* returns the type of a linear object */ static int type (lua_State *L) { if (luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE) != NULL) { lua_pushliteral(L, "vector"); return 1; } if (luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE) != NULL) { lua_pushliteral(L, "matrix"); return 1; } lua_pushnil(L); return 1; } /* returns the size of a linear object */ static int size (lua_State *L) { struct vector *x; struct matrix *X; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushinteger(L, x->size); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_pushinteger(L, X->rows); lua_pushinteger(L, X->cols); lua_pushstring(L, ORDERS[X->order == CblasRowMajor ? 0 : 1]); return 3; } return argerror(L, 1); } /* transposed vector */ static int tvector (lua_State *L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; case CblasColMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[index - 1]); x->inc = X->ld; lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* subvector or submatrix */ static int sub (lua_State *L) { struct vector *x, *s; struct matrix *X, *S; /* process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { int start, end; start = luaL_optinteger(L, 2, 1); luaL_argcheck(L, start >= 1 && start <= x->size, 2, "bad index"); end = luaL_optinteger(L, 3, x->size); luaL_argcheck(L, end >= start && end <= x->size, 3, "bad index"); s = wrapvector(L, end - start + 1, &x->values[ (size_t)(start - 1) * x->inc]); s->inc = x->inc; lua_pushvalue(L, 1); s->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { int rowstart, rowend, colstart, colend; switch (X->order){ case CblasRowMajor: rowstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 2, "bad index"); colstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 3, "bad index"); rowend = luaL_optinteger(L, 4, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 4, "bad index"); colend = luaL_optinteger(L, 5, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t)(rowstart - 1) * X->ld + colstart - 1]); break; case CblasColMajor: colstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 2, "bad index"); rowstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 3, "bad index"); colend = luaL_optinteger(L, 4, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 4, "bad index"); rowend = luaL_optinteger(L, 5, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t)(colstart - 1) * X->ld + rowstart - 1]); break; default: /* not reached */ assert(0); return 0; } S->ld = X->ld; lua_pushvalue(L, 1); S->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } return argerror(L, 1); } /* unwinds matrices into a vector */ static int unwind (lua_State *L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; if (lua_gettop(L) == 0) { return luaL_error(L, "wrong number of arguments"); } x = luaL_checkudata(L, lua_gettop(L), LUALINEAR_VECTOR_METATABLE); index = 1; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->cols; k++) { x->values[(size_t)i * x->inc] = X->values[base + k]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->rows; k++) { x->values[(size_t)i * x->inc] = X->values[base + k]; i++; } } break; } index++; } return 0; } /* reshapes a vector into matrices */ static int reshape (lua_State *L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = 2; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->cols; k++) { X->values[base + k] = x->values[ (size_t)i * x->inc]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->rows; k++) { X->values[base + k] = x->values[ (size_t)i * x->inc]; i++; } } break; } index++; } return 0; } /* converts a vector or matrix to a table */ static int totable (lua_State *L) { struct vector *x; struct matrix *X; int i, j; const float *value; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_createtable(L, 0, 3); lua_pushliteral(L, "vector"); lua_setfield(L, -2, "type"); lua_pushinteger(L, x->size); lua_setfield(L, -2, "length"); lua_createtable(L, x->size, 0); value = x->values; for (i = 0; i < x->size; i++) { lua_pushnumber(L, *value); lua_rawseti(L, -2, i + 1); value += x->inc; } lua_setfield(L, -2, "values"); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_createtable(L, 0, 5); lua_pushliteral(L, "matrix"); lua_setfield(L, -2, "type"); lua_pushinteger(L, X->rows); lua_setfield(L, -2, "rows"); lua_pushinteger(L, X->cols); lua_setfield(L, -2, "cols"); switch (X->order) { case CblasRowMajor: lua_pushliteral(L, "rowmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->rows, 0); for (i = 0; i < X->rows; i++) { lua_createtable(L, X->cols, 0); value = &X->values[(size_t)i * X->ld]; for (j = 0; j < X->cols; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; case CblasColMajor: lua_pushliteral(L, "colmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->cols, 0); for (i = 0; i < X->cols; i++) { lua_createtable(L, X->rows, 0); value = &X->values[(size_t)i * X->ld]; for (j = 0; j < X->rows; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; } return 1; } return argerror(L, 1); } /* converts a table to a vector or matrix */ static int tolinear (lua_State *L) { static const char *types[] = { "vector", "matrix", NULL }; static const char *orders[] = { "rowmajor", "colmajor", NULL }; struct vector *x; struct matrix *X; int size, rows, cols, major, minor; CBLAS_ORDER order; int i, j; int isnum; float *value; /* check arguments */ luaL_checktype(L, 1, LUA_TTABLE); lua_settop(L, 1); /* handle types */ switch (optionvalue(L, "type", NULL, types)) { case 0: /* vector */ size = intvalue(L, "length", -1); if (size < 1) { return luaL_error(L, "bad field " LUA_QS, "length"); } x = newvector(L, size); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } value = x->values; for (i = 0; i < size; i++) { lua_rawgeti(L, -1, i + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at index %d", i + 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; case 1: /* matrix */ rows = intvalue(L, "rows", -1); if (rows < 1) { return luaL_error(L, "bad field " LUA_QS, "rows"); } cols = intvalue(L, "cols", -1); if (cols < 1) { return luaL_error(L, "bad field " LUA_QS, "cols"); } switch (optionvalue(L, "order", NULL, orders)) { case 0: order = CblasRowMajor; major = rows; minor = cols; break; case 1: order = CblasColMajor; major = cols; minor = rows; break; default: /* not reched */ assert(0); return 0; } X = newmatrix(L, rows, cols, order); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } for (i = 0; i < major; i++) { value = &X->values[i * X->ld]; lua_rawgeti(L, -1, i + 1); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad value at index %d", i + 1); } for (j = 0; j < minor; j++) { lua_rawgeti(L, -1, j + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at " "index (%d,%d)", i + 1, j + 1); } lua_pop(L, 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; } /* not reached */ assert(0); return 0; } /* invokes the DOT subprogram (x' y) */ static int dot (lua_State *L) { struct vector *x, *y; float dot; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); /* invoke subprogram */ dot = cblas_sdot(x->size, x->values, x->inc, y->values, y->inc); lua_pushnumber(L, dot); return 1; } /* invokes the NRM2 subprogram (||x||_2) */ static int nrm2 (lua_State *L) { struct vector *x; float nrm2; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ nrm2 = cblas_snrm2(x->size, x->values, x->inc); lua_pushnumber(L, nrm2); return 1; } /* invokes the ASUM subprogram (sigma |x|) */ static int asum (lua_State *L) { struct vector *x; float asum; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ asum = cblas_sasum(x->size, x->values, x->inc); lua_pushnumber(L, asum); return 1; } /* invokes the IAMAX subprogram (argmax |x|) */ static int iamax (lua_State *L) { struct vector *x; int iamax; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ iamax = cblas_isamax(x->size, x->values, x->inc); lua_pushinteger(L, iamax + 1); return 1; } /* sum implementation */ static float _sum (const float *values, int size, int inc) { float sum; int i; sum = 0.0; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) reduction(+:sum) for (i = 0; i < size; i++) { sum += values[(size_t)i * inc]; } return sum; } /* sum implementation (sigma x_i) */ static int sum (lua_State *L) { struct vector *x, *y; struct matrix *X; int i; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushnumber(L, _sum(x->values, x->size, x->inc)); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); switch (checktranspose(L, 3)) { case CblasNoTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i * X->ld], X->cols, 1); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i * X->ld], X->rows, 1); } break; } break; case CblasTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i], X->rows, X->ld); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i], X->cols, X->ld); } break; } break; default: /* not reached */ assert(0); break; } return 0; } return argerror(L, 1); } /* xy function */ typedef void(*xyfunction)(int, float *, int, float *, int, float); /* invokes an (x,y) subproram */ static int xy (lua_State *L, xyfunction s, int hasy, int hasalpha) { int index, i; float alpha; struct vector *x, *y; struct matrix *X, *Y; /* check and process arguments */ index = 2; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { if (hasy) { y = luaL_testudata(L, 2, LUALINEAR_VECTOR_METATABLE); Y = luaL_testudata(L, 2, LUALINEAR_MATRIX_METATABLE); if (y == NULL && Y == NULL) { return argerror(L, 2); } index++; } else { y = x; Y = NULL; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } if (y != NULL) { /* invoke subprogram on vector-vector */ luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); s(x->size, x->values, x->inc, y->values, y->inc, alpha); return 0; } /* invoke subprogram on vector-matrix */ switch (checktranspose(L, index)) { case CblasNoTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; } break; case CblasTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i], Y->ld, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i], Y->ld, alpha); } break; } break; default: /* not reached */ assert(0); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { if (hasy) { Y = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->order == Y->order, 2, "order mismatch"); luaL_argcheck(L, X->rows == Y->rows && X->cols == Y->cols, 2, "dimension mismatch"); index++; } else { Y = X; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } /* invoke subprogram on matrix-matrix */ switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { s(X->cols, &X->values[(size_t)i * X->ld], 1, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { s(X->rows, &X->values[(size_t)i * X->ld], 1, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; } return 0; } return argerror(L, 1); } /* wraps the SWAP subprogram */ static void _swap (int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_sswap(size, x, incx, y, incy); } /* invokes the SWAP subprogram (y <-> x) */ static int swap (lua_State *L) { return xy(L, _swap, 1, 0); } /* wraps the COPY subprogram */ static void _copy (int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_scopy(size, x, incx, y, incy); } /* invokes the COPY subprogram (y <- x) */ static int copy (lua_State *L) { return xy(L, _copy, 1, 0); } /* wraps the AXPY subprogram */ static void _axpy (int size, float *x, int incx, float *y, int incy, float alpha) { cblas_saxpy(size, alpha, x, incx, y, incy); } /* invokes the AXPY subprogram (y <- alpha x + y) */ static int axpy (lua_State *L) { return xy(L, _axpy, 1, 1); } /* wraps the SCAL subprogram */ static void _scal (int size, float *x, int incx, float *y, int incy, float alpha) { (void)y; (void)incy; cblas_sscal(size, alpha, x, incx); } /* invokes the SCAL subprogram (x <- alpha x) */ static int scal (lua_State *L) { return xy(L, _scal, 0, 1); } /* set operation implementation */ static void _set (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t)i * incx] = alpha; } } /* performs a set operation (x <- alpha) */ static int set (lua_State *L) { return xy(L, _set, 0, 1); } /* uniform RNG implementation */ static void _uniform (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; (void)alpha; for (i = 0; i < size; i++) { *x = (float)random() * (1.0 / ((float)RAND_MAX + 1.0)); x += incx; } } /* performs a uniform operation (x <- uniform) */ static int uniform (lua_State *L) { return xy(L, _uniform, 0, 0); } /* normal RNG implementation */ static void _normal (int size, float *x, int incx, float *y, int incy, float alpha) { int i; float u1, u2, r, s, c; (void)y; (void)incy; (void)alpha; for (i = 0; i < size - 1; i += 2) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); r = sqrt(-2.0 * logf(u1)); sincosf(2 * M_PI * u2, &s, &c); *x = r * c; x += incx; *x = r * s; x += incx; } if (i < size) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); *x = sqrtf(-2.0 * logf(u1)) * cosf(2 * M_PI * u2); x += incx; } } /* performs a normal operation (x <- normal) */ static int normal (lua_State *L) { return xy(L, _normal, 0, 0); } /* inc operation implementation */ static void _inc (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t)i * incx] += alpha; } } /* performs a inc operation (x <- x + alpha) */ static int inc (lua_State *L) { return xy(L, _inc, 0, 1); } /* element-wise multiplication implementation, alpha = 1 */ static void _mul1 (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t)i * incy] *= x[(size_t)i * incx]; } } /* element-wise multiplication implementation, alpha = -1 */ static void _mulm1 (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t)i * incy] /= x[(size_t)i * incx]; } } /* element-wise multiplication implementation, alpha = any */ static void _mul (int size, float *x, int incx, float *y, int incy, float alpha) { int i; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t)i * incy] *= pow(x[(size_t)i * incx], alpha); } } /* performs element-wise multiplication (y <- x^alpha .* y) */ static int mul (lua_State *L) { float alpha; alpha = luaL_optnumber(L, 3, 1.0); if (alpha == 1.0) { return xy(L, _mul1, 1, 1); } if (alpha == -1.0) { return xy(L, _mulm1, 1, 1); } return xy(L, _mul, 1, 1); } /* power raising operation implementation */ static void _pow (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t)i * incx] = pow(x[(size_t)i * incx], alpha); } } /* performs element-wise power raising (x <- x^alpha) */ static int powx (lua_State *L) { return xy(L, _pow, 0, 1); } /* apply function */ typedef float(*applyfunction)(float); /* applies a function to a value */ static int apply (lua_State *L, applyfunction apply, int parallel) { struct vector *x; struct matrix *X; int i, j; size_t base; if (lua_type(L, 1) == LUA_TNUMBER) { lua_pushnumber(L, apply(lua_tonumber(L, 1))); return 1; } x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { #pragma omp parallel for private(i) schedule(auto) \ if(parallel && x->size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < x->size; i++) { x->values[(size_t)i * x->inc] = apply(x->values[(size_t)i * x->inc]); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { base = (size_t)i * X->ld; #pragma omp parallel for private(j) \ schedule(auto) \ if(parallel && X->cols \ >= LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->cols; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { base = (size_t)i * X->ld; #pragma omp parallel for private(j) \ schedule(auto) \ if(parallel && X->rows \ >= LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->rows; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; } return 0; } return luaL_argerror(L, 1, lua_pushfstring(L, "number, vector, or " "matrix expected, got %s", luaL_typename(L, 1))); } /* sign function implementation */ static float _sign (float x) { if (x > 0) { return 1; } if (x < 0) { return -1; } return x; } /* sign function */ static int sign (lua_State *L) { return apply(L, _sign, 1); } /* abs function implementation */ static float _abs (float x) { return fabs(x); } /* abs function */ static int absx (lua_State *L) { return apply(L, _abs, 1); } /* exp function */ static int expx (lua_State *L) { return apply(L, expf, 1); } /* log function */ static int logx (lua_State *L) { return apply(L, logf, 1); } /* logistic function implementation */ static float _logistic (float z) { return 1.0 / (1.0 + expf(-z)); } /* logistic function */ static int logistic (lua_State *L) { return apply(L, _logistic, 1); } /* tanh function */ static int tanhx (lua_State *L) { return apply(L, tanhf, 1); } /* softplus function implementation */ static float _softplus (float x) { return logf(1 + expf(x)); } /* softplus function */ static int softplus (lua_State *L) { return apply(L, _softplus, 1); } /* rectifier function implementation */ static float _rectifier (float x) { return x > 0.0 ? x : 0.0; } /* rectifier function */ static int rectifier (lua_State *L) { return apply(L, _rectifier, 1); } /* current Lua state */ static __thread lua_State *TL; /* apply function implementation */ static float _apply (float x) { float result; lua_pushvalue(TL, -1); lua_pushnumber(TL, x); lua_call(TL, 1, 1); result = lua_tonumber(TL, -1); lua_pop(TL, 1); return result; } /* apply function */ static int applyx (lua_State *L) { luaL_checktype(L, 2, LUA_TFUNCTION); lua_settop(L, 2); TL = L; return apply(L, _apply, 0); } /* invokes the GEMV subprogram (y <- alpha A x + b y) */ static int gemv (lua_State *L) { struct matrix *A; struct vector *x, *y; float alpha, beta; CBLAS_TRANSPOSE ta; int m, n; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); x = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 3, LUALINEAR_VECTOR_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); m = ta == CblasNoTrans ? A->rows : A->cols; n = ta == CblasNoTrans ? A->cols : A->rows; luaL_argcheck(L, x->size == n, 2, "dimension mismatch"); luaL_argcheck(L, y->size == m, 3, "dimension mismatch"); /* invoke subprogram */ cblas_sgemv(A->order, ta, A->rows, A->cols, alpha, A->values, A->ld, x->values, x->inc, beta, y->values, y->inc); return 0; } /* invokes the GER subprogram (A <- alpha x y' + A) */ static int ger (lua_State *L) { struct vector *x, *y; struct matrix *A; float alpha; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); A = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); luaL_argcheck(L, x->size == A->rows, 1, "dimension mismatch"); luaL_argcheck(L, y->size == A->cols, 2, "dimension mismatch"); /* invoke subprogram */ cblas_sger(A->order, A->rows, A->cols, alpha, x->values, x->inc, y->values, y->inc, A->values, A->ld); return 0; } /* invokes the GEMM subprogram (C <- alpha A B + beta C) */ static int gemm (lua_State *L) { struct matrix *A, *B, *C; float alpha, beta; CBLAS_TRANSPOSE ta, tb; int m, n, ka, kb; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); C = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, C->order == A->order, 3, "order mismatch"); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); tb = checktranspose(L, 7); m = ta == CblasNoTrans ? A->rows : A->cols; n = tb == CblasNoTrans ? B->cols : B->rows; ka = ta == CblasNoTrans ? A->cols : A->rows; kb = tb == CblasNoTrans ? B->rows : B->cols; luaL_argcheck(L, ka == kb, 2, "dimension mismatch"); /* invoke subprogramm */ cblas_sgemm(A->order, ta, tb, m, n, ka, alpha, A->values, A->ld, B->values, B->ld, beta, C->values, C->ld); return 0; } /* invokes the GESV subprogram */ static int gesv (lua_State *L) { struct matrix *A, *B; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); luaL_argcheck(L, B->rows == A->rows, 2, "dimension mismatch"); /* invoke subprogramm */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgesv(A->order, A->rows, B->cols, A->values, A->ld, ipiv, B->values, B->ld); free(ipiv); lua_pushinteger(L, result); return 1; } /* invokes the GELS subprogram */ static int gels (lua_State *L) { struct matrix *A, *B; char ta; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); ta = lapacktranspose(checktranspose(L, 3)); luaL_argcheck(L, B->rows == (A->rows >= A->cols ? A->rows : A->cols), 2, "dimension mismatch"); /* invoke subprogramm */ lua_pushinteger(L, LAPACKE_sgels(A->order, ta, A->rows, A->cols, B->cols, A->values, A->ld, B->values, B->ld)); return 1; } /* calculates the inverse of a matrix */ static int inv (lua_State *L) { struct matrix *A; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); /* invoke subprograms */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, A->rows, A->cols, A->values, A->ld, ipiv); if (result != 0) { free(ipiv); lua_pushinteger(L, result); return 1; } result = LAPACKE_sgetri(A->order, A->rows, A->values, A->ld, ipiv); free(ipiv); lua_pushinteger(L, result); return 1; } /* calculates the determinant of a matrix */ static int det (lua_State *L) { struct matrix *A; float *copy, *d, *s, det; int n, *ipiv, result, neg, i; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); n = A->rows; /* copy matrix */ copy = calloc((size_t)n * n, sizeof(float)); if (copy == NULL) { return luaL_error(L, "cannot allocate values"); } d = copy; s = A->values; for (i = 0; i < n; i++) { memcpy(d, s, (size_t)n * sizeof(float)); d += n; s += A->ld; } /* invoke subprograms */ ipiv = calloc(n, sizeof(lapack_int)); if (ipiv == NULL) { free(copy); return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, n, n, copy, n, ipiv); if (result != 0) { free(copy); free(ipiv); lua_pushnumber(L, 0.0); return 1; } /* calculate determinant */ det = 1.0; neg = 0; for (i = 0; i < n; i++) { det *= copy[(size_t)i * n + i]; if (ipiv[i] != i + 1) { neg = !neg; } } free(copy); free(ipiv); lua_pushnumber(L, neg ? -det : det); return 1; } /* calculates the covariance of a matrix */ static int cov (lua_State *L) { struct matrix *A, *B; int ddof, i, j, k; float *means, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); ddof = luaL_optinteger(L, 3, 0); luaL_argcheck(L, ddof >= 0 && ddof < A->rows, 3, "bad ddof"); /* calculate means */ means = calloc((size_t)A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; } break; case CblasColMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t)i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; } break; } /* calculate covariance */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (A->rows - ddof); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t)i * A->ld]; vj = &A->values[(size_t)j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (A->rows - ddof); } } break; } free(means); return 0; } /* calculates the correlation of a matrix */ static int corr (lua_State *L) { struct matrix *A, *B; int i, j, k; float *means, *stds, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); /* calculate means and stds */ means = calloc((size_t)A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } stds = calloc((size_t)A->cols, sizeof(float)); if (stds == NULL) { free(means); return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v += A->ld; } stds[i] = sqrt(sum); } break; case CblasColMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t)i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[(size_t)i * A->ld]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v++; } stds[i] = sqrt(sum); } break; } /* calculate correlation */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t)i * A->ld]; vj = &A->values[(size_t)j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; } free(means); free(stds); return 0; } /* * Exported functions. */ int luaopen_linear (lua_State *L) { static const luaL_Reg FUNCTIONS[] = { { "vector", vector }, { "matrix", matrix }, { "type", type }, { "size", size }, { "tvector", tvector }, { "sub", sub }, { "unwind", unwind }, { "reshape", reshape }, { "totable", totable }, { "tolinear", tolinear }, { "dot", dot }, { "nrm2", nrm2 }, { "asum", asum }, { "iamax", iamax }, { "sum", sum }, { "swap", swap }, { "copy", copy }, { "axpy", axpy }, { "scal", scal }, { "set", set }, { "uniform", uniform }, { "normal", normal }, { "inc", inc }, { "mul", mul }, { "pow", powx }, { "sign", sign }, { "abs", absx }, { "exp", expx }, { "log", logx }, { "logistic", logistic }, { "tanh", tanhx }, { "softplus", softplus }, { "rectifier", rectifier }, { "apply", applyx }, { "gemv", gemv }, { "ger", ger }, { "gemm", gemm }, { "gesv", gesv }, { "gels", gels }, { "inv", inv }, { "det", det }, { "cov", cov }, { "corr", corr }, { NULL, NULL } }; /* register functions */ #if LUA_VERSION_NUM >= 502 luaL_newlib(L, FUNCTIONS); #else luaL_register(L, luaL_checkstring(L, 1), FUNCTIONS); #endif /* vector metatable */ luaL_newmetatable(L, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, vector_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, vector_newindex); lua_setfield(L, -2, "__newindex"); lua_pushcfunction(L, vector_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, vector_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, vector_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); /* matrix metatable */ luaL_newmetatable(L, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, matrix_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, matrix_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, matrix_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, matrix_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); return 1; }
#include "linear.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <lauxlib.h> #include <cblas.h> #include <lapacke.h> /* matrix orders */ static const char *const ORDERS[] = {"row", "col", NULL}; /* checks an order */ static CBLAS_ORDER checkorder(lua_State * L, int index) { switch (luaL_checkoption(L, index, "row", ORDERS)) { case 0: return CblasRowMajor; case 1: return CblasColMajor; } /* not reached */ assert(0); return (CBLAS_ORDER) 0; } /* checks a transpose */ static CBLAS_TRANSPOSE checktranspose(lua_State * L, int index) { static const char *const TRANSPOSES[] = {"notrans", "trans", NULL}; switch (luaL_checkoption(L, index, "notrans", TRANSPOSES)) { case 0: return CblasNoTrans; case 1: return CblasTrans; } /* not reached */ assert(0); return (CBLAS_TRANSPOSE) 0; } /* translates a transpose for LAPACK */ static char lapacktranspose(CBLAS_TRANSPOSE transpose) { switch (transpose) { case CblasNoTrans: return 'N'; case CblasTrans: return 'T'; default: /* not reached */ assert(0); return '\0'; } } /* returns an int value from a table */ static int intvalue(lua_State * L, const char *key, int dfl) { int result, isinteger; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { result = lua_tointegerx(L, -1, &isinteger); if (!isinteger) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl < 0) { luaL_error(L, "missing field " LUA_QS, key); } result = dfl; } lua_pop(L, 1); return result; } /* returns an option value from a table */ static int optionvalue(lua_State * L, const char *key, const char *dfl, const char *options[]) { const char *str; int i; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { str = lua_tostring(L, -1); if (str == NULL) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl == NULL) { luaL_error(L, "missing field " LUA_QS, key); } str = dfl; } lua_pop(L, 1); for (i = 0; options[i] != NULL; i++) { if (strcmp(options[i], str) == 0) { return i; } } luaL_error(L, "bad option " LUA_QS " in field " LUA_QS, str, key); return 0; /* not reached */ } /* raises a linear argument error */ static int argerror(lua_State * L, int index) { return luaL_argerror(L, index, lua_pushfstring(L, "vector, or matrix " "expected, got %s", luaL_typename(L, index))); } /* pushes a new vector onto the stack */ static struct vector * newvector(lua_State * L, int size) { return lualinear_newvector(L, size); } /* pushes an existing vector onto the stack */ static struct vector * wrapvector(lua_State * L, int size, float *values) { return lualinear_wrapvector(L, size, values); } /* creates a new vector */ static int vector(lua_State * L) { int size; /* process arguments */ size = luaL_checkinteger(L, 1); luaL_argcheck(L, size >= 1, 1, "bad dimension"); /* create */ newvector(L, size); return 1; } /* vector length implementation */ static int vector_len(lua_State * L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushinteger(L, x->size); return 1; } /* vector index implementation */ static int vector_index(lua_State * L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); lua_pushnumber(L, x->values[(size_t) (index - 1) * x->inc]); return 1; } /* matrix vector newindex implementation */ static int vector_newindex(lua_State * L) { struct vector *x; int index; float value; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); value = luaL_checknumber(L, 3); x->values[(size_t) (index - 1) * x->inc] = value; return 0; } /* vector next function */ static int vector_next(lua_State * L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); if (index >= 0 && index < x->size) { lua_pushinteger(L, index + 1); lua_pushnumber(L, x->values[(size_t) index]); return 2; } lua_pushnil(L); return 1; } /* vector ipairs function */ static int vector_ipairs(lua_State * L) { luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a vector */ static int vector_tostring(lua_State * L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushfstring(L, "vector: %p", x); return 1; } /* frees a vector */ static int vector_free(lua_State * L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x->ref == LUA_NOREF) { free(x->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, x->ref); } return 0; } /* pushes a new matrix onto the stack */ static struct matrix * newmatrix(lua_State * L, int rows, int cols, CBLAS_ORDER order) { return lualinear_newmatrix(L, rows, cols, order); } /* pushes an existing matrix onto the stack */ static struct matrix * wrapmatrix(lua_State * L, int rows, int cols, CBLAS_ORDER order, float *values) { return lualinear_wrapmatrix(L, rows, cols, order, values); } /* creates a new matrix */ static int matrix(lua_State * L) { int rows, cols; CBLAS_ORDER order; /* process arguments */ rows = luaL_checkinteger(L, 1); luaL_argcheck(L, rows >= 1, 1, "bad dimension"); cols = luaL_checkinteger(L, 2); luaL_argcheck(L, cols >= 1, 2, "bad dimension"); order = checkorder(L, 3); /* create */ newmatrix(L, rows, cols, order); return 1; } /* returns the length of a matrix */ static int matrix_len(lua_State * L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); switch (X->order) { case CblasRowMajor: lua_pushinteger(L, X->rows); break; case CblasColMajor: lua_pushinteger(L, X->cols); break; } return 1; } /* matrix index implementation */ static int matrix_index(lua_State * L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; case CblasColMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[(size_t) (index - 1) * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* matrix next function */ static int matrix_next(lua_State * L) { struct matrix *X; int index, majorsize, minorsize; struct vector *x; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); switch (X->order) { case CblasRowMajor: majorsize = X->rows; minorsize = X->cols; break; case CblasColMajor: majorsize = X->cols; minorsize = X->rows; break; default: /* not reached */ assert(0); return 0; } if (index >= 0 && index < majorsize) { lua_pushinteger(L, index + 1); x = wrapvector(L, minorsize, &X->values[(size_t) index * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 2; } lua_pushnil(L); return 1; } /* matrix ipairs function */ static int matrix_ipairs(lua_State * L) { luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a matrix */ static int matrix_tostring(lua_State * L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushfstring(L, "matrix: %p", X); return 1; } /* frees a matrix */ static int matrix_free(lua_State * L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X->ref == LUA_NOREF) { free(X->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, X->ref); } return 0; } /* returns the type of a linear object */ static int type(lua_State * L) { if (luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE) != NULL) { lua_pushliteral(L, "vector"); return 1; } if (luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE) != NULL) { lua_pushliteral(L, "matrix"); return 1; } lua_pushnil(L); return 1; } /* returns the size of a linear object */ static int size(lua_State * L) { struct vector *x; struct matrix *X; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushinteger(L, x->size); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_pushinteger(L, X->rows); lua_pushinteger(L, X->cols); lua_pushstring(L, ORDERS[X->order == CblasRowMajor ? 0 : 1]); return 3; } return argerror(L, 1); } /* transposed vector */ static int tvector(lua_State * L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; case CblasColMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[index - 1]); x->inc = X->ld; lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* subvector or submatrix */ static int sub(lua_State * L) { struct vector *x, *s; struct matrix *X, *S; /* process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { int start, end; start = luaL_optinteger(L, 2, 1); luaL_argcheck(L, start >= 1 && start <= x->size, 2, "bad index"); end = luaL_optinteger(L, 3, x->size); luaL_argcheck(L, end >= start && end <= x->size, 3, "bad index"); s = wrapvector(L, end - start + 1, &x->values[ (size_t) (start - 1) * x->inc]); s->inc = x->inc; lua_pushvalue(L, 1); s->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { int rowstart, rowend, colstart, colend; switch (X->order) { case CblasRowMajor: rowstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 2, "bad index"); colstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 3, "bad index"); rowend = luaL_optinteger(L, 4, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 4, "bad index"); colend = luaL_optinteger(L, 5, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t) (rowstart - 1) * X->ld + colstart - 1]); break; case CblasColMajor: colstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 2, "bad index"); rowstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 3, "bad index"); colend = luaL_optinteger(L, 4, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 4, "bad index"); rowend = luaL_optinteger(L, 5, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t) (colstart - 1) * X->ld + rowstart - 1]); break; default: /* not reached */ assert(0); return 0; } S->ld = X->ld; lua_pushvalue(L, 1); S->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } return argerror(L, 1); } /* unwinds matrices into a vector */ static int unwind(lua_State * L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; if (lua_gettop(L) == 0) { return luaL_error(L, "wrong number of arguments"); } x = luaL_checkudata(L, lua_gettop(L), LUALINEAR_VECTOR_METATABLE); index = 1; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->cols; k++) { x->values[(size_t) i * x->inc] = X->values[base + k]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->rows; k++) { x->values[(size_t) i * x->inc] = X->values[base + k]; i++; } } break; } index++; } return 0; } /* reshapes a vector into matrices */ static int reshape(lua_State * L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = 2; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->cols; k++) { X->values[base + k] = x->values[ (size_t) i * x->inc]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->rows; k++) { X->values[base + k] = x->values[ (size_t) i * x->inc]; i++; } } break; } index++; } return 0; } /* converts a vector or matrix to a table */ static int totable(lua_State * L) { struct vector *x; struct matrix *X; int i, j; const float *value; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_createtable(L, 0, 3); lua_pushliteral(L, "vector"); lua_setfield(L, -2, "type"); lua_pushinteger(L, x->size); lua_setfield(L, -2, "length"); lua_createtable(L, x->size, 0); value = x->values; for (i = 0; i < x->size; i++) { lua_pushnumber(L, *value); lua_rawseti(L, -2, i + 1); value += x->inc; } lua_setfield(L, -2, "values"); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_createtable(L, 0, 5); lua_pushliteral(L, "matrix"); lua_setfield(L, -2, "type"); lua_pushinteger(L, X->rows); lua_setfield(L, -2, "rows"); lua_pushinteger(L, X->cols); lua_setfield(L, -2, "cols"); switch (X->order) { case CblasRowMajor: lua_pushliteral(L, "rowmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->rows, 0); for (i = 0; i < X->rows; i++) { lua_createtable(L, X->cols, 0); value = &X->values[(size_t) i * X->ld]; for (j = 0; j < X->cols; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; case CblasColMajor: lua_pushliteral(L, "colmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->cols, 0); for (i = 0; i < X->cols; i++) { lua_createtable(L, X->rows, 0); value = &X->values[(size_t) i * X->ld]; for (j = 0; j < X->rows; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; } return 1; } return argerror(L, 1); } /* converts a table to a vector or matrix */ static int tolinear(lua_State * L) { static const char *types[] = {"vector", "matrix", NULL}; static const char *orders[] = {"rowmajor", "colmajor", NULL}; struct vector *x; struct matrix *X; int size, rows, cols, major, minor; CBLAS_ORDER order; int i, j; int isnum; float *value; /* check arguments */ luaL_checktype(L, 1, LUA_TTABLE); lua_settop(L, 1); /* handle types */ switch (optionvalue(L, "type", NULL, types)) { case 0: /* vector */ size = intvalue(L, "length", -1); if (size < 1) { return luaL_error(L, "bad field " LUA_QS, "length"); } x = newvector(L, size); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } value = x->values; for (i = 0; i < size; i++) { lua_rawgeti(L, -1, i + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at index %d", i + 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; case 1: /* matrix */ rows = intvalue(L, "rows", -1); if (rows < 1) { return luaL_error(L, "bad field " LUA_QS, "rows"); } cols = intvalue(L, "cols", -1); if (cols < 1) { return luaL_error(L, "bad field " LUA_QS, "cols"); } switch (optionvalue(L, "order", NULL, orders)) { case 0: order = CblasRowMajor; major = rows; minor = cols; break; case 1: order = CblasColMajor; major = cols; minor = rows; break; default: /* not reched */ assert(0); return 0; } X = newmatrix(L, rows, cols, order); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } for (i = 0; i < major; i++) { value = &X->values[i * X->ld]; lua_rawgeti(L, -1, i + 1); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad value at index %d", i + 1); } for (j = 0; j < minor; j++) { lua_rawgeti(L, -1, j + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at " "index (%d,%d)", i + 1, j + 1); } lua_pop(L, 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; } /* not reached */ assert(0); return 0; } /* invokes the DOT subprogram (x' y) */ static int dot(lua_State * L) { struct vector *x, *y; float dot; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); /* invoke subprogram */ dot = cblas_sdot(x->size, x->values, x->inc, y->values, y->inc); lua_pushnumber(L, dot); return 1; } /* invokes the NRM2 subprogram (||x||_2) */ static int nrm2(lua_State * L) { struct vector *x; float nrm2; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ nrm2 = cblas_snrm2(x->size, x->values, x->inc); lua_pushnumber(L, nrm2); return 1; } /* invokes the ASUM subprogram (sigma |x|) */ static int asum(lua_State * L) { struct vector *x; float asum; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ asum = cblas_sasum(x->size, x->values, x->inc); lua_pushnumber(L, asum); return 1; } /* invokes the IAMAX subprogram (argmax |x|) */ static int iamax(lua_State * L) { struct vector *x; int iamax; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ iamax = cblas_isamax(x->size, x->values, x->inc); lua_pushinteger(L, iamax + 1); return 1; } /* sum implementation */ static float _sum(const float *values, int size, int inc) { float sum; int i; sum = 0.0; if (size >= LUALINEAR_OMP_MINSIZE) reduction(+:sum) for (i = 0; i < size; i++) { sum += values[(size_t) i * inc]; } return sum; } /* sum implementation (sigma x_i) */ static int sum(lua_State * L) { struct vector *x, *y; struct matrix *X; int i; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushnumber(L, _sum(x->values, x->size, x->inc)); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); switch (checktranspose(L, 3)) { case CblasNoTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i * X->ld], X->cols, 1); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i * X->ld], X->rows, 1); } break; } break; case CblasTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i], X->rows, X->ld); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i], X->cols, X->ld); } break; } break; default: /* not reached */ assert(0); break; } return 0; } return argerror(L, 1); } /* xy function */ typedef void (*xyfunction) (int, float *, int, float *, int, float); /* invokes an (x,y) subproram */ static int xy(lua_State * L, xyfunction s, int hasy, int hasalpha) { int index, i; float alpha; struct vector *x, *y; struct matrix *X, *Y; /* check and process arguments */ index = 2; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { if (hasy) { y = luaL_testudata(L, 2, LUALINEAR_VECTOR_METATABLE); Y = luaL_testudata(L, 2, LUALINEAR_MATRIX_METATABLE); if (y == NULL && Y == NULL) { return argerror(L, 2); } index++; } else { y = x; Y = NULL; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } if (y != NULL) { /* invoke subprogram on vector-vector */ luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); s(x->size, x->values, x->inc, y->values, y->inc, alpha); return 0; } /* invoke subprogram on vector-matrix */ switch (checktranspose(L, index)) { case CblasNoTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; } break; case CblasTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i], Y->ld, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i], Y->ld, alpha); } break; } break; default: /* not reached */ assert(0); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { if (hasy) { Y = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->order == Y->order, 2, "order mismatch"); luaL_argcheck(L, X->rows == Y->rows && X->cols == Y->cols, 2, "dimension mismatch"); index++; } else { Y = X; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } /* invoke subprogram on matrix-matrix */ switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { s(X->cols, &X->values[(size_t) i * X->ld], 1, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { s(X->rows, &X->values[(size_t) i * X->ld], 1, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; } return 0; } return argerror(L, 1); } /* wraps the SWAP subprogram */ static void _swap(int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_sswap(size, x, incx, y, incy); } /* invokes the SWAP subprogram (y <-> x) */ static int swap(lua_State * L) { return xy(L, _swap, 1, 0); } /* wraps the COPY subprogram */ static void _copy(int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_scopy(size, x, incx, y, incy); } /* invokes the COPY subprogram (y <- x) */ static int copy(lua_State * L) { return xy(L, _copy, 1, 0); } /* wraps the AXPY subprogram */ static void _axpy(int size, float *x, int incx, float *y, int incy, float alpha) { cblas_saxpy(size, alpha, x, incx, y, incy); } /* invokes the AXPY subprogram (y <- alpha x + y) */ static int axpy(lua_State * L) { return xy(L, _axpy, 1, 1); } /* wraps the SCAL subprogram */ static void _scal(int size, float *x, int incx, float *y, int incy, float alpha) { (void)y; (void)incy; cblas_sscal(size, alpha, x, incx); } /* invokes the SCAL subprogram (x <- alpha x) */ static int scal(lua_State * L) { return xy(L, _scal, 0, 1); } /* set operation implementation */ static void _set(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; if (size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t) i * incx] = alpha; } } /* performs a set operation (x <- alpha) */ static int set(lua_State * L) { return xy(L, _set, 0, 1); } /* uniform RNG implementation */ static void _uniform(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; (void)alpha; for (i = 0; i < size; i++) { *x = (float)random() * (1.0 / ((float)RAND_MAX + 1.0)); x += incx; } } /* performs a uniform operation (x <- uniform) */ static int uniform(lua_State * L) { return xy(L, _uniform, 0, 0); } /* normal RNG implementation */ static void _normal(int size, float *x, int incx, float *y, int incy, float alpha) { int i; float u1, u2, r, s, c; (void)y; (void)incy; (void)alpha; for (i = 0; i < size - 1; i += 2) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); r = sqrt(-2.0 * logf(u1)); sincosf(2 * M_PI * u2, &s, &c); *x = r * c; x += incx; *x = r * s; x += incx; } if (i < size) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); *x = sqrtf(-2.0 * logf(u1)) * cosf(2 * M_PI * u2); x += incx; } } /* performs a normal operation (x <- normal) */ static int normal(lua_State * L) { return xy(L, _normal, 0, 0); } /* inc operation implementation */ static void _inc(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; if (size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t) i * incx] += alpha; } } /* performs a inc operation (x <- x + alpha) */ static int inc(lua_State * L) { return xy(L, _inc, 0, 1); } /* element-wise multiplication implementation, alpha = 1 */ static void _mul1(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; if (size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t) i * incy] *= x[(size_t) i * incx]; } } /* element-wise multiplication implementation, alpha = -1 */ static void _mulm1(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; if (size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t) i * incy] /= x[(size_t) i * incx]; } } /* element-wise multiplication implementation, alpha = any */ static void _mul(int size, float *x, int incx, float *y, int incy, float alpha) { int i; if (size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t) i * incy] *= pow(x[(size_t) i * incx], alpha); } } /* performs element-wise multiplication (y <- x^alpha .* y) */ static int mul(lua_State * L) { float alpha; alpha = luaL_optnumber(L, 3, 1.0); if (alpha == 1.0) { return xy(L, _mul1, 1, 1); } if (alpha == -1.0) { return xy(L, _mulm1, 1, 1); } return xy(L, _mul, 1, 1); } /* power raising operation implementation */ static void _pow(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; if (size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t) i * incx] = pow(x[(size_t) i * incx], alpha); } } /* performs element-wise power raising (x <- x^alpha) */ static int powx(lua_State * L) { return xy(L, _pow, 0, 1); } /* apply function */ typedef float (*applyfunction) (float); /* applies a function to a value */ static int apply(lua_State * L, applyfunction apply, int parallel) { struct vector *x; struct matrix *X; int i, j; size_t base; if (lua_type(L, 1) == LUA_TNUMBER) { lua_pushnumber(L, apply(lua_tonumber(L, 1))); return 1; } x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { if (parallel && x->size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < x->size; i++) { x->values[(size_t) i * x->inc] = apply(x->values[(size_t) i * x->inc]); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { base = (size_t) i *X->ld; schedule(auto) \ if (parallel && X->cols \ >=LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->cols; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { base = (size_t) i *X->ld; schedule(auto) \ if (parallel && X->rows \ >=LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->rows; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; } return 0; } return luaL_argerror(L, 1, lua_pushfstring(L, "number, vector, or " "matrix expected, got %s", luaL_typename(L, 1))); } /* sign function implementation */ static float _sign(float x) { if (x > 0) { return 1; } if (x < 0) { return -1; } return x; } /* sign function */ static int sign(lua_State * L) { return apply(L, _sign, 1); } /* abs function implementation */ static float _abs(float x) { return fabs(x); } /* abs function */ static int absx(lua_State * L) { return apply(L, _abs, 1); } /* exp function */ static int expx(lua_State * L) { return apply(L, expf, 1); } /* log function */ static int logx(lua_State * L) { return apply(L, logf, 1); } /* logistic function implementation */ static float _logistic(float z) { return 1.0 / (1.0 + expf(-z)); } /* logistic function */ static int logistic(lua_State * L) { return apply(L, _logistic, 1); } /* tanh function */ static int tanhx(lua_State * L) { return apply(L, tanhf, 1); } /* softplus function implementation */ static float _softplus(float x) { return logf(1 + expf(x)); } /* softplus function */ static int softplus(lua_State * L) { return apply(L, _softplus, 1); } /* rectifier function implementation */ static float _rectifier(float x) { return x > 0.0 ? x : 0.0; } /* rectifier function */ static int rectifier(lua_State * L) { return apply(L, _rectifier, 1); } /* current Lua state */ static __thread lua_State *TL; /* apply function implementation */ static float _apply(float x) { float result; lua_pushvalue(TL, -1); lua_pushnumber(TL, x); lua_call(TL, 1, 1); result = lua_tonumber(TL, -1); lua_pop(TL, 1); return result; } /* apply function */ static int applyx(lua_State * L) { luaL_checktype(L, 2, LUA_TFUNCTION); lua_settop(L, 2); TL = L; return apply(L, _apply, 0); } /* invokes the GEMV subprogram (y <- alpha A x + b y) */ static int gemv(lua_State * L) { struct matrix *A; struct vector *x, *y; float alpha, beta; CBLAS_TRANSPOSE ta; int m, n; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); x = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 3, LUALINEAR_VECTOR_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); m = ta == CblasNoTrans ? A->rows : A->cols; n = ta == CblasNoTrans ? A->cols : A->rows; luaL_argcheck(L, x->size == n, 2, "dimension mismatch"); luaL_argcheck(L, y->size == m, 3, "dimension mismatch"); /* invoke subprogram */ cblas_sgemv(A->order, ta, A->rows, A->cols, alpha, A->values, A->ld, x->values, x->inc, beta, y->values, y->inc); return 0; } /* invokes the GER subprogram (A <- alpha x y' + A) */ static int ger(lua_State * L) { struct vector *x, *y; struct matrix *A; float alpha; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); A = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); luaL_argcheck(L, x->size == A->rows, 1, "dimension mismatch"); luaL_argcheck(L, y->size == A->cols, 2, "dimension mismatch"); /* invoke subprogram */ cblas_sger(A->order, A->rows, A->cols, alpha, x->values, x->inc, y->values, y->inc, A->values, A->ld); return 0; } /* invokes the GEMM subprogram (C <- alpha A B + beta C) */ static int gemm(lua_State * L) { struct matrix *A, *B, *C; float alpha, beta; CBLAS_TRANSPOSE ta, tb; int m, n, ka, kb; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); C = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, C->order == A->order, 3, "order mismatch"); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); tb = checktranspose(L, 7); m = ta == CblasNoTrans ? A->rows : A->cols; n = tb == CblasNoTrans ? B->cols : B->rows; ka = ta == CblasNoTrans ? A->cols : A->rows; kb = tb == CblasNoTrans ? B->rows : B->cols; luaL_argcheck(L, ka == kb, 2, "dimension mismatch"); /* invoke subprogramm */ cblas_sgemm(A->order, ta, tb, m, n, ka, alpha, A->values, A->ld, B->values, B->ld, beta, C->values, C->ld); return 0; } /* invokes the GESV subprogram */ static int gesv(lua_State * L) { struct matrix *A, *B; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); luaL_argcheck(L, B->rows == A->rows, 2, "dimension mismatch"); /* invoke subprogramm */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgesv(A->order, A->rows, B->cols, A->values, A->ld, ipiv, B->values, B->ld); free(ipiv); lua_pushinteger(L, result); return 1; } /* invokes the GELS subprogram */ static int gels(lua_State * L) { struct matrix *A, *B; char ta; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); ta = lapacktranspose(checktranspose(L, 3)); luaL_argcheck(L, B->rows == (A->rows >= A->cols ? A->rows : A->cols), 2, "dimension mismatch"); /* invoke subprogramm */ lua_pushinteger(L, LAPACKE_sgels(A->order, ta, A->rows, A->cols, B->cols, A->values, A->ld, B->values, B->ld)); return 1; } /* calculates the inverse of a matrix */ static int inv(lua_State * L) { struct matrix *A; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); /* invoke subprograms */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, A->rows, A->cols, A->values, A->ld, ipiv); if (result != 0) { free(ipiv); lua_pushinteger(L, result); return 1; } result = LAPACKE_sgetri(A->order, A->rows, A->values, A->ld, ipiv); free(ipiv); lua_pushinteger(L, result); return 1; } /* calculates the determinant of a matrix */ static int det(lua_State * L) { struct matrix *A; float *copy, *d, *s, det; int n, *ipiv, result, neg, i; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); n = A->rows; /* copy matrix */ copy = calloc((size_t) n * n, sizeof(float)); if (copy == NULL) { return luaL_error(L, "cannot allocate values"); } d = copy; s = A->values; for (i = 0; i < n; i++) { memcpy(d, s, (size_t) n * sizeof(float)); d += n; s += A->ld; } /* invoke subprograms */ ipiv = calloc(n, sizeof(lapack_int)); if (ipiv == NULL) { free(copy); return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, n, n, copy, n, ipiv); if (result != 0) { free(copy); free(ipiv); lua_pushnumber(L, 0.0); return 1; } /* calculate determinant */ det = 1.0; neg = 0; for (i = 0; i < n; i++) { det *= copy[(size_t) i * n + i]; if (ipiv[i] != i + 1) { neg = !neg; } } free(copy); free(ipiv); lua_pushnumber(L, neg ? -det : det); return 1; } /* calculates the covariance of a matrix */ static int cov(lua_State * L) { struct matrix *A, *B; int ddof, i, j, k; float *means, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); ddof = luaL_optinteger(L, 3, 0); luaL_argcheck(L, ddof >= 0 && ddof < A->rows, 3, "bad ddof"); /* calculate means */ means = calloc((size_t) A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: if (A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; } break; case CblasColMajor: if (A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t) i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; } break; } /* calculate covariance */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { schedule(auto) if (A->rows * (A->cols \ -i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (A->rows - ddof); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { schedule(auto) if (A->rows * (A->cols \ -i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t) i * A->ld]; vj = &A->values[(size_t) j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (A->rows - ddof); } } break; } free(means); return 0; } /* calculates the correlation of a matrix */ static int corr(lua_State * L) { struct matrix *A, *B; int i, j, k; float *means, *stds, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); /* calculate means and stds */ means = calloc((size_t) A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } stds = calloc((size_t) A->cols, sizeof(float)); if (stds == NULL) { free(means); return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: if (A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v += A->ld; } stds[i] = sqrt(sum); } break; case CblasColMajor: if (A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t) i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[(size_t) i * A->ld]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v++; } stds[i] = sqrt(sum); } break; } /* calculate correlation */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { schedule(auto) if (A->rows * (A->cols \ -i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { schedule(auto) if (A->rows * (A->cols \ -i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t) i * A->ld]; vj = &A->values[(size_t) j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; } free(means); free(stds); return 0; } /* * Exported functions. */ int luaopen_linear(lua_State * L) { static const luaL_Reg FUNCTIONS[] = { {"vector", vector}, {"matrix", matrix}, {"type", type}, {"size", size}, {"tvector", tvector}, {"sub", sub}, {"unwind", unwind}, {"reshape", reshape}, {"totable", totable}, {"tolinear", tolinear}, {"dot", dot}, {"nrm2", nrm2}, {"asum", asum}, {"iamax", iamax}, {"sum", sum}, {"swap", swap}, {"copy", copy}, {"axpy", axpy}, {"scal", scal}, {"set", set}, {"uniform", uniform}, {"normal", normal}, {"inc", inc}, {"mul", mul}, {"pow", powx}, {"sign", sign}, {"abs", absx}, {"exp", expx}, {"log", logx}, {"logistic", logistic}, {"tanh", tanhx}, {"softplus", softplus}, {"rectifier", rectifier}, {"apply", applyx}, {"gemv", gemv}, {"ger", ger}, {"gemm", gemm}, {"gesv", gesv}, {"gels", gels}, {"inv", inv}, {"det", det}, {"cov", cov}, {"corr", corr}, {NULL, NULL} }; /* register functions */ #if LUA_VERSION_NUM >= 502 luaL_newlib(L, FUNCTIONS); #else luaL_register(L, luaL_checkstring(L, 1), FUNCTIONS); #endif /* vector metatable */ luaL_newmetatable(L, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, vector_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, vector_newindex); lua_setfield(L, -2, "__newindex"); lua_pushcfunction(L, vector_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, vector_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, vector_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); /* matrix metatable */ luaL_newmetatable(L, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, matrix_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, matrix_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, matrix_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, matrix_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); return 1; }
#include "linear.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <lauxlib.h> #include <cblas.h> #include <lapacke.h> /* matrix orders */ static const char *const ORDERS[] = {"row", "col", NULL}; /* checks an order */ static CBLAS_ORDER checkorder(lua_State * L, int index) { switch (luaL_checkoption(L, index, "row", ORDERS)) { case 0: return CblasRowMajor; case 1: return CblasColMajor; } /* not reached */ assert(0); return (CBLAS_ORDER) 0; } /* checks a transpose */ static CBLAS_TRANSPOSE checktranspose(lua_State * L, int index) { static const char *const TRANSPOSES[] = {"notrans", "trans", NULL}; switch (luaL_checkoption(L, index, "notrans", TRANSPOSES)) { case 0: return CblasNoTrans; case 1: return CblasTrans; } /* not reached */ assert(0); return (CBLAS_TRANSPOSE) 0; } /* translates a transpose for LAPACK */ static char lapacktranspose(CBLAS_TRANSPOSE transpose) { switch (transpose) { case CblasNoTrans: return 'N'; case CblasTrans: return 'T'; default: /* not reached */ assert(0); return '\0'; } } /* returns an int value from a table */ static int intvalue(lua_State * L, const char *key, int dfl) { int result, isinteger; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { result = lua_tointegerx(L, -1, &isinteger); if (!isinteger) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl < 0) { luaL_error(L, "missing field " LUA_QS, key); } result = dfl; } lua_pop(L, 1); return result; } /* returns an option value from a table */ static int optionvalue(lua_State * L, const char *key, const char *dfl, const char *options[]) { const char *str; int i; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { str = lua_tostring(L, -1); if (str == NULL) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl == NULL) { luaL_error(L, "missing field " LUA_QS, key); } str = dfl; } lua_pop(L, 1); for (i = 0; options[i] != NULL; i++) { if (strcmp(options[i], str) == 0) { return i; } } luaL_error(L, "bad option " LUA_QS " in field " LUA_QS, str, key); return 0; /* not reached */ } /* raises a linear argument error */ static int argerror(lua_State * L, int index) { return luaL_argerror(L, index, lua_pushfstring(L, "vector, or matrix " "expected, got %s", luaL_typename(L, index))); } /* pushes a new vector onto the stack */ static struct vector * newvector(lua_State * L, int size) { return lualinear_newvector(L, size); } /* pushes an existing vector onto the stack */ static struct vector * wrapvector(lua_State * L, int size, float *values) { return lualinear_wrapvector(L, size, values); } /* creates a new vector */ static int vector(lua_State * L) { int size; /* process arguments */ size = luaL_checkinteger(L, 1); luaL_argcheck(L, size >= 1, 1, "bad dimension"); /* create */ newvector(L, size); return 1; } /* vector length implementation */ static int vector_len(lua_State * L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushinteger(L, x->size); return 1; } /* vector index implementation */ static int vector_index(lua_State * L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); lua_pushnumber(L, x->values[(size_t) (index - 1) * x->inc]); return 1; } /* matrix vector newindex implementation */ static int vector_newindex(lua_State * L) { struct vector *x; int index; float value; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); value = luaL_checknumber(L, 3); x->values[(size_t) (index - 1) * x->inc] = value; return 0; } /* vector next function */ static int vector_next(lua_State * L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); if (index >= 0 && index < x->size) { lua_pushinteger(L, index + 1); lua_pushnumber(L, x->values[(size_t) index]); return 2; } lua_pushnil(L); return 1; } /* vector ipairs function */ static int vector_ipairs(lua_State * L) { luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a vector */ static int vector_tostring(lua_State * L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushfstring(L, "vector: %p", x); return 1; } /* frees a vector */ static int vector_free(lua_State * L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x->ref == LUA_NOREF) { free(x->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, x->ref); } return 0; } /* pushes a new matrix onto the stack */ static struct matrix * newmatrix(lua_State * L, int rows, int cols, CBLAS_ORDER order) { return lualinear_newmatrix(L, rows, cols, order); } /* pushes an existing matrix onto the stack */ static struct matrix * wrapmatrix(lua_State * L, int rows, int cols, CBLAS_ORDER order, float *values) { return lualinear_wrapmatrix(L, rows, cols, order, values); } /* creates a new matrix */ static int matrix(lua_State * L) { int rows, cols; CBLAS_ORDER order; /* process arguments */ rows = luaL_checkinteger(L, 1); luaL_argcheck(L, rows >= 1, 1, "bad dimension"); cols = luaL_checkinteger(L, 2); luaL_argcheck(L, cols >= 1, 2, "bad dimension"); order = checkorder(L, 3); /* create */ newmatrix(L, rows, cols, order); return 1; } /* returns the length of a matrix */ static int matrix_len(lua_State * L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); switch (X->order) { case CblasRowMajor: lua_pushinteger(L, X->rows); break; case CblasColMajor: lua_pushinteger(L, X->cols); break; } return 1; } /* matrix index implementation */ static int matrix_index(lua_State * L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; case CblasColMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[(size_t) (index - 1) * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* matrix next function */ static int matrix_next(lua_State * L) { struct matrix *X; int index, majorsize, minorsize; struct vector *x; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); switch (X->order) { case CblasRowMajor: majorsize = X->rows; minorsize = X->cols; break; case CblasColMajor: majorsize = X->cols; minorsize = X->rows; break; default: /* not reached */ assert(0); return 0; } if (index >= 0 && index < majorsize) { lua_pushinteger(L, index + 1); x = wrapvector(L, minorsize, &X->values[(size_t) index * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 2; } lua_pushnil(L); return 1; } /* matrix ipairs function */ static int matrix_ipairs(lua_State * L) { luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a matrix */ static int matrix_tostring(lua_State * L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushfstring(L, "matrix: %p", X); return 1; } /* frees a matrix */ static int matrix_free(lua_State * L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X->ref == LUA_NOREF) { free(X->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, X->ref); } return 0; } /* returns the type of a linear object */ static int type(lua_State * L) { if (luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE) != NULL) { lua_pushliteral(L, "vector"); return 1; } if (luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE) != NULL) { lua_pushliteral(L, "matrix"); return 1; } lua_pushnil(L); return 1; } /* returns the size of a linear object */ static int size(lua_State * L) { struct vector *x; struct matrix *X; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushinteger(L, x->size); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_pushinteger(L, X->rows); lua_pushinteger(L, X->cols); lua_pushstring(L, ORDERS[X->order == CblasRowMajor ? 0 : 1]); return 3; } return argerror(L, 1); } /* transposed vector */ static int tvector(lua_State * L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; case CblasColMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[index - 1]); x->inc = X->ld; lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* subvector or submatrix */ static int sub(lua_State * L) { struct vector *x, *s; struct matrix *X, *S; /* process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { int start, end; start = luaL_optinteger(L, 2, 1); luaL_argcheck(L, start >= 1 && start <= x->size, 2, "bad index"); end = luaL_optinteger(L, 3, x->size); luaL_argcheck(L, end >= start && end <= x->size, 3, "bad index"); s = wrapvector(L, end - start + 1, &x->values[ (size_t) (start - 1) * x->inc]); s->inc = x->inc; lua_pushvalue(L, 1); s->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { int rowstart, rowend, colstart, colend; switch (X->order) { case CblasRowMajor: rowstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 2, "bad index"); colstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 3, "bad index"); rowend = luaL_optinteger(L, 4, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 4, "bad index"); colend = luaL_optinteger(L, 5, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t) (rowstart - 1) * X->ld + colstart - 1]); break; case CblasColMajor: colstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 2, "bad index"); rowstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 3, "bad index"); colend = luaL_optinteger(L, 4, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 4, "bad index"); rowend = luaL_optinteger(L, 5, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t) (colstart - 1) * X->ld + rowstart - 1]); break; default: /* not reached */ assert(0); return 0; } S->ld = X->ld; lua_pushvalue(L, 1); S->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } return argerror(L, 1); } /* unwinds matrices into a vector */ static int unwind(lua_State * L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; if (lua_gettop(L) == 0) { return luaL_error(L, "wrong number of arguments"); } x = luaL_checkudata(L, lua_gettop(L), LUALINEAR_VECTOR_METATABLE); index = 1; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->cols; k++) { x->values[(size_t) i * x->inc] = X->values[base + k]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->rows; k++) { x->values[(size_t) i * x->inc] = X->values[base + k]; i++; } } break; } index++; } return 0; } /* reshapes a vector into matrices */ static int reshape(lua_State * L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = 2; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->cols; k++) { X->values[base + k] = x->values[ (size_t) i * x->inc]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t) j *X->ld; for (k = 0; k < X->rows; k++) { X->values[base + k] = x->values[ (size_t) i * x->inc]; i++; } } break; } index++; } return 0; } /* converts a vector or matrix to a table */ static int totable(lua_State * L) { struct vector *x; struct matrix *X; int i, j; const float *value; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_createtable(L, 0, 3); lua_pushliteral(L, "vector"); lua_setfield(L, -2, "type"); lua_pushinteger(L, x->size); lua_setfield(L, -2, "length"); lua_createtable(L, x->size, 0); value = x->values; for (i = 0; i < x->size; i++) { lua_pushnumber(L, *value); lua_rawseti(L, -2, i + 1); value += x->inc; } lua_setfield(L, -2, "values"); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_createtable(L, 0, 5); lua_pushliteral(L, "matrix"); lua_setfield(L, -2, "type"); lua_pushinteger(L, X->rows); lua_setfield(L, -2, "rows"); lua_pushinteger(L, X->cols); lua_setfield(L, -2, "cols"); switch (X->order) { case CblasRowMajor: lua_pushliteral(L, "rowmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->rows, 0); for (i = 0; i < X->rows; i++) { lua_createtable(L, X->cols, 0); value = &X->values[(size_t) i * X->ld]; for (j = 0; j < X->cols; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; case CblasColMajor: lua_pushliteral(L, "colmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->cols, 0); for (i = 0; i < X->cols; i++) { lua_createtable(L, X->rows, 0); value = &X->values[(size_t) i * X->ld]; for (j = 0; j < X->rows; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; } return 1; } return argerror(L, 1); } /* converts a table to a vector or matrix */ static int tolinear(lua_State * L) { static const char *types[] = {"vector", "matrix", NULL}; static const char *orders[] = {"rowmajor", "colmajor", NULL}; struct vector *x; struct matrix *X; int size, rows, cols, major, minor; CBLAS_ORDER order; int i, j; int isnum; float *value; /* check arguments */ luaL_checktype(L, 1, LUA_TTABLE); lua_settop(L, 1); /* handle types */ switch (optionvalue(L, "type", NULL, types)) { case 0: /* vector */ size = intvalue(L, "length", -1); if (size < 1) { return luaL_error(L, "bad field " LUA_QS, "length"); } x = newvector(L, size); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } value = x->values; for (i = 0; i < size; i++) { lua_rawgeti(L, -1, i + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at index %d", i + 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; case 1: /* matrix */ rows = intvalue(L, "rows", -1); if (rows < 1) { return luaL_error(L, "bad field " LUA_QS, "rows"); } cols = intvalue(L, "cols", -1); if (cols < 1) { return luaL_error(L, "bad field " LUA_QS, "cols"); } switch (optionvalue(L, "order", NULL, orders)) { case 0: order = CblasRowMajor; major = rows; minor = cols; break; case 1: order = CblasColMajor; major = cols; minor = rows; break; default: /* not reched */ assert(0); return 0; } X = newmatrix(L, rows, cols, order); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } for (i = 0; i < major; i++) { value = &X->values[i * X->ld]; lua_rawgeti(L, -1, i + 1); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad value at index %d", i + 1); } for (j = 0; j < minor; j++) { lua_rawgeti(L, -1, j + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at " "index (%d,%d)", i + 1, j + 1); } lua_pop(L, 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; } /* not reached */ assert(0); return 0; } /* invokes the DOT subprogram (x' y) */ static int dot(lua_State * L) { struct vector *x, *y; float dot; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); /* invoke subprogram */ dot = cblas_sdot(x->size, x->values, x->inc, y->values, y->inc); lua_pushnumber(L, dot); return 1; } /* invokes the NRM2 subprogram (||x||_2) */ static int nrm2(lua_State * L) { struct vector *x; float nrm2; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ nrm2 = cblas_snrm2(x->size, x->values, x->inc); lua_pushnumber(L, nrm2); return 1; } /* invokes the ASUM subprogram (sigma |x|) */ static int asum(lua_State * L) { struct vector *x; float asum; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ asum = cblas_sasum(x->size, x->values, x->inc); lua_pushnumber(L, asum); return 1; } /* invokes the IAMAX subprogram (argmax |x|) */ static int iamax(lua_State * L) { struct vector *x; int iamax; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ iamax = cblas_isamax(x->size, x->values, x->inc); lua_pushinteger(L, iamax + 1); return 1; } /* sum implementation */ static float _sum(const float *values, int size, int inc) { float sum; int i; sum = 0.0; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) reduction(+:sum) for (i = 0; i < size; i++) { sum += values[(size_t) i * inc]; } return sum; } /* sum implementation (sigma x_i) */ static int sum(lua_State * L) { struct vector *x, *y; struct matrix *X; int i; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushnumber(L, _sum(x->values, x->size, x->inc)); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); switch (checktranspose(L, 3)) { case CblasNoTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i * X->ld], X->cols, 1); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i * X->ld], X->rows, 1); } break; } break; case CblasTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i], X->rows, X->ld); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t) i * y->inc] = _sum( &X->values[(size_t) i], X->cols, X->ld); } break; } break; default: /* not reached */ assert(0); break; } return 0; } return argerror(L, 1); } /* xy function */ typedef void (*xyfunction) (int, float *, int, float *, int, float); /* invokes an (x,y) subproram */ static int xy(lua_State * L, xyfunction s, int hasy, int hasalpha) { int index, i; float alpha; struct vector *x, *y; struct matrix *X, *Y; /* check and process arguments */ index = 2; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { if (hasy) { y = luaL_testudata(L, 2, LUALINEAR_VECTOR_METATABLE); Y = luaL_testudata(L, 2, LUALINEAR_MATRIX_METATABLE); if (y == NULL && Y == NULL) { return argerror(L, 2); } index++; } else { y = x; Y = NULL; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } if (y != NULL) { /* invoke subprogram on vector-vector */ luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); s(x->size, x->values, x->inc, y->values, y->inc, alpha); return 0; } /* invoke subprogram on vector-matrix */ switch (checktranspose(L, index)) { case CblasNoTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; } break; case CblasTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i], Y->ld, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t) i], Y->ld, alpha); } break; } break; default: /* not reached */ assert(0); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { if (hasy) { Y = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->order == Y->order, 2, "order mismatch"); luaL_argcheck(L, X->rows == Y->rows && X->cols == Y->cols, 2, "dimension mismatch"); index++; } else { Y = X; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } /* invoke subprogram on matrix-matrix */ switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { s(X->cols, &X->values[(size_t) i * X->ld], 1, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { s(X->rows, &X->values[(size_t) i * X->ld], 1, &Y->values[(size_t) i * Y->ld], 1, alpha); } break; } return 0; } return argerror(L, 1); } /* wraps the SWAP subprogram */ static void _swap(int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_sswap(size, x, incx, y, incy); } /* invokes the SWAP subprogram (y <-> x) */ static int swap(lua_State * L) { return xy(L, _swap, 1, 0); } /* wraps the COPY subprogram */ static void _copy(int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_scopy(size, x, incx, y, incy); } /* invokes the COPY subprogram (y <- x) */ static int copy(lua_State * L) { return xy(L, _copy, 1, 0); } /* wraps the AXPY subprogram */ static void _axpy(int size, float *x, int incx, float *y, int incy, float alpha) { cblas_saxpy(size, alpha, x, incx, y, incy); } /* invokes the AXPY subprogram (y <- alpha x + y) */ static int axpy(lua_State * L) { return xy(L, _axpy, 1, 1); } /* wraps the SCAL subprogram */ static void _scal(int size, float *x, int incx, float *y, int incy, float alpha) { (void)y; (void)incy; cblas_sscal(size, alpha, x, incx); } /* invokes the SCAL subprogram (x <- alpha x) */ static int scal(lua_State * L) { return xy(L, _scal, 0, 1); } /* set operation implementation */ static void _set(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t) i * incx] = alpha; } } /* performs a set operation (x <- alpha) */ static int set(lua_State * L) { return xy(L, _set, 0, 1); } /* uniform RNG implementation */ static void _uniform(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; (void)alpha; for (i = 0; i < size; i++) { *x = (float)random() * (1.0 / ((float)RAND_MAX + 1.0)); x += incx; } } /* performs a uniform operation (x <- uniform) */ static int uniform(lua_State * L) { return xy(L, _uniform, 0, 0); } /* normal RNG implementation */ static void _normal(int size, float *x, int incx, float *y, int incy, float alpha) { int i; float u1, u2, r, s, c; (void)y; (void)incy; (void)alpha; for (i = 0; i < size - 1; i += 2) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); r = sqrt(-2.0 * logf(u1)); sincosf(2 * M_PI * u2, &s, &c); *x = r * c; x += incx; *x = r * s; x += incx; } if (i < size) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); *x = sqrtf(-2.0 * logf(u1)) * cosf(2 * M_PI * u2); x += incx; } } /* performs a normal operation (x <- normal) */ static int normal(lua_State * L) { return xy(L, _normal, 0, 0); } /* inc operation implementation */ static void _inc(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t) i * incx] += alpha; } } /* performs a inc operation (x <- x + alpha) */ static int inc(lua_State * L) { return xy(L, _inc, 0, 1); } /* element-wise multiplication implementation, alpha = 1 */ static void _mul1(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t) i * incy] *= x[(size_t) i * incx]; } } /* element-wise multiplication implementation, alpha = -1 */ static void _mulm1(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t) i * incy] /= x[(size_t) i * incx]; } } /* element-wise multiplication implementation, alpha = any */ static void _mul(int size, float *x, int incx, float *y, int incy, float alpha) { int i; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t) i * incy] *= pow(x[(size_t) i * incx], alpha); } } /* performs element-wise multiplication (y <- x^alpha .* y) */ static int mul(lua_State * L) { float alpha; alpha = luaL_optnumber(L, 3, 1.0); if (alpha == 1.0) { return xy(L, _mul1, 1, 1); } if (alpha == -1.0) { return xy(L, _mulm1, 1, 1); } return xy(L, _mul, 1, 1); } /* power raising operation implementation */ static void _pow(int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t) i * incx] = pow(x[(size_t) i * incx], alpha); } } /* performs element-wise power raising (x <- x^alpha) */ static int powx(lua_State * L) { return xy(L, _pow, 0, 1); } /* apply function */ typedef float (*applyfunction) (float); /* applies a function to a value */ static int apply(lua_State * L, applyfunction apply, int parallel) { struct vector *x; struct matrix *X; int i, j; size_t base; if (lua_type(L, 1) == LUA_TNUMBER) { lua_pushnumber(L, apply(lua_tonumber(L, 1))); return 1; } x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { #pragma omp parallel for private(i) schedule(auto) \ if(parallel && x->size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < x->size; i++) { x->values[(size_t) i * x->inc] = apply(x->values[(size_t) i * x->inc]); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { base = (size_t) i *X->ld; #pragma omp parallel for private(j) \ schedule(auto) \ if(parallel && X->cols \ >= LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->cols; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { base = (size_t) i *X->ld; #pragma omp parallel for private(j) \ schedule(auto) \ if(parallel && X->rows \ >= LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->rows; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; } return 0; } return luaL_argerror(L, 1, lua_pushfstring(L, "number, vector, or " "matrix expected, got %s", luaL_typename(L, 1))); } /* sign function implementation */ static float _sign(float x) { if (x > 0) { return 1; } if (x < 0) { return -1; } return x; } /* sign function */ static int sign(lua_State * L) { return apply(L, _sign, 1); } /* abs function implementation */ static float _abs(float x) { return fabs(x); } /* abs function */ static int absx(lua_State * L) { return apply(L, _abs, 1); } /* exp function */ static int expx(lua_State * L) { return apply(L, expf, 1); } /* log function */ static int logx(lua_State * L) { return apply(L, logf, 1); } /* logistic function implementation */ static float _logistic(float z) { return 1.0 / (1.0 + expf(-z)); } /* logistic function */ static int logistic(lua_State * L) { return apply(L, _logistic, 1); } /* tanh function */ static int tanhx(lua_State * L) { return apply(L, tanhf, 1); } /* softplus function implementation */ static float _softplus(float x) { return logf(1 + expf(x)); } /* softplus function */ static int softplus(lua_State * L) { return apply(L, _softplus, 1); } /* rectifier function implementation */ static float _rectifier(float x) { return x > 0.0 ? x : 0.0; } /* rectifier function */ static int rectifier(lua_State * L) { return apply(L, _rectifier, 1); } /* current Lua state */ static __thread lua_State *TL; /* apply function implementation */ static float _apply(float x) { float result; lua_pushvalue(TL, -1); lua_pushnumber(TL, x); lua_call(TL, 1, 1); result = lua_tonumber(TL, -1); lua_pop(TL, 1); return result; } /* apply function */ static int applyx(lua_State * L) { luaL_checktype(L, 2, LUA_TFUNCTION); lua_settop(L, 2); TL = L; return apply(L, _apply, 0); } /* invokes the GEMV subprogram (y <- alpha A x + b y) */ static int gemv(lua_State * L) { struct matrix *A; struct vector *x, *y; float alpha, beta; CBLAS_TRANSPOSE ta; int m, n; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); x = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 3, LUALINEAR_VECTOR_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); m = ta == CblasNoTrans ? A->rows : A->cols; n = ta == CblasNoTrans ? A->cols : A->rows; luaL_argcheck(L, x->size == n, 2, "dimension mismatch"); luaL_argcheck(L, y->size == m, 3, "dimension mismatch"); /* invoke subprogram */ cblas_sgemv(A->order, ta, A->rows, A->cols, alpha, A->values, A->ld, x->values, x->inc, beta, y->values, y->inc); return 0; } /* invokes the GER subprogram (A <- alpha x y' + A) */ static int ger(lua_State * L) { struct vector *x, *y; struct matrix *A; float alpha; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); A = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); luaL_argcheck(L, x->size == A->rows, 1, "dimension mismatch"); luaL_argcheck(L, y->size == A->cols, 2, "dimension mismatch"); /* invoke subprogram */ cblas_sger(A->order, A->rows, A->cols, alpha, x->values, x->inc, y->values, y->inc, A->values, A->ld); return 0; } /* invokes the GEMM subprogram (C <- alpha A B + beta C) */ static int gemm(lua_State * L) { struct matrix *A, *B, *C; float alpha, beta; CBLAS_TRANSPOSE ta, tb; int m, n, ka, kb; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); C = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, C->order == A->order, 3, "order mismatch"); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); tb = checktranspose(L, 7); m = ta == CblasNoTrans ? A->rows : A->cols; n = tb == CblasNoTrans ? B->cols : B->rows; ka = ta == CblasNoTrans ? A->cols : A->rows; kb = tb == CblasNoTrans ? B->rows : B->cols; luaL_argcheck(L, ka == kb, 2, "dimension mismatch"); /* invoke subprogramm */ cblas_sgemm(A->order, ta, tb, m, n, ka, alpha, A->values, A->ld, B->values, B->ld, beta, C->values, C->ld); return 0; } /* invokes the GESV subprogram */ static int gesv(lua_State * L) { struct matrix *A, *B; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); luaL_argcheck(L, B->rows == A->rows, 2, "dimension mismatch"); /* invoke subprogramm */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgesv(A->order, A->rows, B->cols, A->values, A->ld, ipiv, B->values, B->ld); free(ipiv); lua_pushinteger(L, result); return 1; } /* invokes the GELS subprogram */ static int gels(lua_State * L) { struct matrix *A, *B; char ta; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); ta = lapacktranspose(checktranspose(L, 3)); luaL_argcheck(L, B->rows == (A->rows >= A->cols ? A->rows : A->cols), 2, "dimension mismatch"); /* invoke subprogramm */ lua_pushinteger(L, LAPACKE_sgels(A->order, ta, A->rows, A->cols, B->cols, A->values, A->ld, B->values, B->ld)); return 1; } /* calculates the inverse of a matrix */ static int inv(lua_State * L) { struct matrix *A; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); /* invoke subprograms */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, A->rows, A->cols, A->values, A->ld, ipiv); if (result != 0) { free(ipiv); lua_pushinteger(L, result); return 1; } result = LAPACKE_sgetri(A->order, A->rows, A->values, A->ld, ipiv); free(ipiv); lua_pushinteger(L, result); return 1; } /* calculates the determinant of a matrix */ static int det(lua_State * L) { struct matrix *A; float *copy, *d, *s, det; int n, *ipiv, result, neg, i; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); n = A->rows; /* copy matrix */ copy = calloc((size_t) n * n, sizeof(float)); if (copy == NULL) { return luaL_error(L, "cannot allocate values"); } d = copy; s = A->values; for (i = 0; i < n; i++) { memcpy(d, s, (size_t) n * sizeof(float)); d += n; s += A->ld; } /* invoke subprograms */ ipiv = calloc(n, sizeof(lapack_int)); if (ipiv == NULL) { free(copy); return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, n, n, copy, n, ipiv); if (result != 0) { free(copy); free(ipiv); lua_pushnumber(L, 0.0); return 1; } /* calculate determinant */ det = 1.0; neg = 0; for (i = 0; i < n; i++) { det *= copy[(size_t) i * n + i]; if (ipiv[i] != i + 1) { neg = !neg; } } free(copy); free(ipiv); lua_pushnumber(L, neg ? -det : det); return 1; } /* calculates the covariance of a matrix */ static int cov(lua_State * L) { struct matrix *A, *B; int ddof, i, j, k; float *means, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); ddof = luaL_optinteger(L, 3, 0); luaL_argcheck(L, ddof >= 0 && ddof < A->rows, 3, "bad ddof"); /* calculate means */ means = calloc((size_t) A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; } break; case CblasColMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t) i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; } break; } /* calculate covariance */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (A->rows - ddof); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t) i * A->ld]; vj = &A->values[(size_t) j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (A->rows - ddof); } } break; } free(means); return 0; } /* calculates the correlation of a matrix */ static int corr(lua_State * L) { struct matrix *A, *B; int i, j, k; float *means, *stds, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); /* calculate means and stds */ means = calloc((size_t) A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } stds = calloc((size_t) A->cols, sizeof(float)); if (stds == NULL) { free(means); return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v += A->ld; } stds[i] = sqrt(sum); } break; case CblasColMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t) i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[(size_t) i * A->ld]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v++; } stds[i] = sqrt(sum); } break; } /* calculate correlation */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t) i * A->ld]; vj = &A->values[(size_t) j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t) i * B->ld + j] = B->values[ (size_t) j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; } free(means); free(stds); return 0; } /* * Exported functions. */ int luaopen_linear(lua_State * L) { static const luaL_Reg FUNCTIONS[] = { {"vector", vector}, {"matrix", matrix}, {"type", type}, {"size", size}, {"tvector", tvector}, {"sub", sub}, {"unwind", unwind}, {"reshape", reshape}, {"totable", totable}, {"tolinear", tolinear}, {"dot", dot}, {"nrm2", nrm2}, {"asum", asum}, {"iamax", iamax}, {"sum", sum}, {"swap", swap}, {"copy", copy}, {"axpy", axpy}, {"scal", scal}, {"set", set}, {"uniform", uniform}, {"normal", normal}, {"inc", inc}, {"mul", mul}, {"pow", powx}, {"sign", sign}, {"abs", absx}, {"exp", expx}, {"log", logx}, {"logistic", logistic}, {"tanh", tanhx}, {"softplus", softplus}, {"rectifier", rectifier}, {"apply", applyx}, {"gemv", gemv}, {"ger", ger}, {"gemm", gemm}, {"gesv", gesv}, {"gels", gels}, {"inv", inv}, {"det", det}, {"cov", cov}, {"corr", corr}, {NULL, NULL} }; /* register functions */ #if LUA_VERSION_NUM >= 502 luaL_newlib(L, FUNCTIONS); #else luaL_register(L, luaL_checkstring(L, 1), FUNCTIONS); #endif /* vector metatable */ luaL_newmetatable(L, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, vector_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, vector_newindex); lua_setfield(L, -2, "__newindex"); lua_pushcfunction(L, vector_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, vector_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, vector_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); /* matrix metatable */ luaL_newmetatable(L, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, matrix_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, matrix_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, matrix_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, matrix_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); return 1; }
omp2-2.c
#include<math.h> #include<stdio.h> #define N 1000000 int main() { int i; double x, area = 0; //#pragma omp parallel for private(x) for (i = 0; i < N; i++) { x = (i + .5) / N; area += 4 / (1 + x*x); } printf("%.10lf\n", area/N); return 0; }
#include<math.h> #include<stdio.h> #define N 1000000 int main() { int i; double x, area = 0; // for (i = 0; i < N; i++) { x = (i + .5) / N; area += 4 / (1 + x * x); } printf("%.10lf\n", area / N); return 0; }
#include<math.h> #include<stdio.h> #define N 1000000 int main() { int i; double x, area = 0; // #pragma omp parallel for private(x) for (i = 0; i < N; i++) { x = (i + .5) / N; area += 4 / (1 + x * x); } printf("%.10lf\n", area / N); return 0; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; // HLSL Change Begin // The HLSL rewriter doesn't define a default matrix pack, // so we must preserve the lack of annotations to avoid changing semantics. bool HasDefaultMatrixPack = false; // Uses of #pragma pack_matrix change the default pack. bool DefaultMatrixPackRowMajor = false; // HLSL Change End. enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *) : Kind(NC_Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); // HLSL Change Begins bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc, UnaryExprOrTypeTrait ExprKind); // HLSL Change Ends bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, bool NextIsLess = false); // HLSL Change - additional special case flag TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool NextIsLess = false); // HLSL Change - additional special case flag bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...). void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter); bool DiagnoseHLSLLookup(const LookupResult &R); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; // HLSL Change Begin // The HLSL rewriter doesn't define a default matrix pack, // so we must preserve the lack of annotations to avoid changing semantics. bool HasDefaultMatrixPack = false; // Uses of #pragma pack_matrix change the default pack. bool DefaultMatrixPackRowMajor = false; // HLSL Change End. enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *) : Kind(NC_Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); // HLSL Change Begins bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc, UnaryExprOrTypeTrait ExprKind); // HLSL Change Ends bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, bool NextIsLess = false); // HLSL Change - additional special case flag TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool NextIsLess = false); // HLSL Change - additional special case flag bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...). void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed ' DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\ StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter); bool DiagnoseHLSLLookup(const LookupResult &R); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; // HLSL Change Begin // The HLSL rewriter doesn't define a default matrix pack, // so we must preserve the lack of annotations to avoid changing semantics. bool HasDefaultMatrixPack = false; // Uses of #pragma pack_matrix change the default pack. bool DefaultMatrixPackRowMajor = false; // HLSL Change End. enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *) : Kind(NC_Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); // HLSL Change Begins bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc, UnaryExprOrTypeTrait ExprKind); // HLSL Change Ends bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, bool NextIsLess = false); // HLSL Change - additional special case flag TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool NextIsLess = false); // HLSL Change - additional special case flag bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...). void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter); bool DiagnoseHLSLLookup(const LookupResult &R); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
GB_binop__min_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_fp64 // A.*B function (eWiseMult): GB_AemultB__min_fp64 // A*D function (colscale): GB_AxD__min_fp64 // D*A function (rowscale): GB_DxB__min_fp64 // C+=B function (dense accum): GB_Cdense_accumB__min_fp64 // C+=b function (dense accum): GB_Cdense_accumb__min_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_fp64 // C=scalar+B GB_bind1st__min_fp64 // C=scalar+B' GB_bind1st_tran__min_fp64 // C=A+scalar GB_bind2nd__min_fp64 // C=A'+scalar GB_bind2nd_tran__min_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = fmin (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__min_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_fp64 // A.*B function (eWiseMult): GB_AemultB__min_fp64 // A*D function (colscale): GB_AxD__min_fp64 // D*A function (rowscale): GB_DxB__min_fp64 // C+=B function (dense accum): GB_Cdense_accumB__min_fp64 // C+=b function (dense accum): GB_Cdense_accumb__min_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_fp64 // C=scalar+B GB_bind1st__min_fp64 // C=scalar+B' GB_bind1st_tran__min_fp64 // C=A+scalar GB_bind2nd__min_fp64 // C=A'+scalar GB_bind2nd_tran__min_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = fmin (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__min_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_fp64 // A.*B function (eWiseMult): GB_AemultB__min_fp64 // A*D function (colscale): GB_AxD__min_fp64 // D*A function (rowscale): GB_DxB__min_fp64 // C+=B function (dense accum): GB_Cdense_accumB__min_fp64 // C+=b function (dense accum): GB_Cdense_accumb__min_fp64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_fp64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_fp64 // C=scalar+B GB_bind1st__min_fp64 // C=scalar+B' GB_bind1st_tran__min_fp64 // C=A+scalar GB_bind2nd__min_fp64 // C=A'+scalar GB_bind2nd_tran__min_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = fmin (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__min_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
smith.c
/*! * \file smith.c * \author Jun Yoshida * \copyright (c) Jun Yoshida 2019 * The project is released under BSD3 License. * \detail * Compute the Smith normal forms by recursively application of hermiteNF_LLL. */ #include "common.h" #include "elementary.h" #include "hermite_lll.h" #include "smith.h" #include <stdlib.h> /* / Debug #include <stdio.h> #define DEBUG_MESSAGE fprintf(stderr, "%s:%d\n", __func__, __LINE__) // */ typedef struct mat_index_t_ { size_t i,j; } mat_index_t; /*! * Find the size of the maximal diagonal matrix D such that * > D | O * > mat = --+-- * > O | * */ static size_t max_diagonal(const matrix_type * mat) { size_t result = mat->r < mat->c ? mat->r : mat->c; #pragma omp parallel for reduction (min:result) for (size_t i = 0; i < mat->r; ++i) { for (size_t j = 0; j < mat->c; ++j) { if ( (i!=j) && (MATRIX_AT(*mat,i,j) != 0) ) { result = (i>j) ? j : i; break; } } } return result; } /*! * Find the last unit in the diagonal entries. */ static inline size_t find_last_unit_diag(const matrix_type *mat) { size_t bound = mat->r < mat->c ? mat->r : mat->c; size_t i; for (i = 0; i < bound; ++i) { if (MATRIX_AT(*mat, i, i) != 1) break; } return i; } /*! * Find the first zero in the diagonal entries. */ static inline size_t find_first_zero_diag(const matrix_type *mat) { size_t bound = mat->r < mat->c ? mat->r : mat->c; size_t i; for (i = 0; i < bound; ++i) { if (MATRIX_AT(*mat, i, i) == 0) break; } return i; } /*! * Eliminate all the off-diagonal entries by applying LLL-based algorithm recursively. * \return the number of non-zero diagonals in the resulting matrix. */ static size_t elim_offdiag(matrix_type * restrict u, matrix_type * restrict uinv, matrix_type * restrict m, matrix_type * restrict v, matrix_type * restrict vinv) { matrix_type u_iter = *u, uinv_iter = *uinv; matrix_type m_iter = *m; /* Iterator reffering to transposed v */ matrix_type vt_iter = { .p = v->p, .r = v->c, .c = v->r, .Xr = v->Xc, .Xc = v->Xr }; matrix_type vinvt_iter = { .p = vinv->p, .r = vinv->c, .c = vinv->r, .Xr = vinv->Xc, .Xc = vinv->Xr }; /* Counter for non-zero diagonals. */ size_t ndiag = 0; while(m_iter.r > 0 && m_iter.c > 0) { hermiteNF_LLL( 1, (matrix_type*[]){&u_iter}, 1, (matrix_type*[]){&uinv_iter}, &m_iter ); transpose(&m_iter); hermiteNF_LLL( 1, (matrix_type*[]){&vt_iter}, 1, (matrix_type*[]){&vinvt_iter}, &m_iter ); transpose(&m_iter); size_t k = max_diagonal(&m_iter); /* update iterators */ m_iter.r -= k; m_iter.c -= k; m_iter.p += k * (m_iter.Xr + m_iter.Xc); u_iter.c -= k; u_iter.p += k * u_iter.Xc; uinv_iter.r -= k; uinv_iter.p += k * uinv_iter.Xr; vt_iter.c -= k; vt_iter.p += k * vt_iter.Xc; vinvt_iter.r -= k; vinvt_iter.p += k * vinvt_iter.Xr; /* update the counter of non-zero diagonals. */ ndiag += k; } return ndiag; } /*! * Compute the Smith normal form of a given matrix. * \return the number of non-zero diagonals in the resulting matrix. */ size_t smithNF(matrix_type * restrict u, matrix_type * restrict uinv, matrix_type * restrict m, matrix_type * restrict v, matrix_type * restrict vinv) { matrix_type u_iter = u ? *u : MATRIX_ZEROROW(m->r); matrix_type uinv_iter = uinv ? *uinv : MATRIX_ZEROCOL(m->r); matrix_type m_iter = *m; matrix_type v_iter = v ? *v : MATRIX_ZEROCOL(m->c); matrix_type vinv_iter = vinv ? *vinv : MATRIX_ZEROROW(m->c); size_t ndiag = elim_offdiag(&u_iter, &uinv_iter, &m_iter, &v_iter, &vinv_iter); // Trim the matrix into square if (m_iter.r > m_iter.c) { m_iter.r = m_iter.c; u_iter.c = m_iter.c; uinv_iter.r = m_iter.c; } else { m_iter.c = m_iter.r; v_iter.r = m_iter.r; vinv_iter.c = m_iter.r; } // Ignore all the diagonal entries == 1 size_t k = find_last_unit_diag(&m_iter); /* / Debug fprintf( stderr, "1!=@%zu\n", k); // */ m_iter.r -= k; m_iter.c -= k; m_iter.p += k * (m_iter.Xr + m_iter.Xc); u_iter.p += k * u_iter.Xc; uinv_iter.p += k * uinv_iter.Xr; v_iter.p += k * v_iter.Xr; vinv_iter.p += k * vinv_iter.Xc; // Ignore the null space k = find_first_zero_diag(&m_iter); /* / Debug fprintf( stderr, "0==@%zu\n", k); // */ m_iter.r = k; m_iter.c = k; u_iter.c = k; uinv_iter.r = k; v_iter.r = k; vinv_iter.c = k; /* / Debug fprintf( stderr, "%zu >< %zu, %"PRId64"\n", m_iter.r, m_iter.c, *(m_iter.p)); // */ while (m_iter.r > 0) { #pragma omp parallel for for (size_t i = 1; i < m_iter.r; ++i) { MATRIX_AT(m_iter, i, 0) = MATRIX_AT(m_iter, i, i); axpy_rows(-1, 0, i, &v_iter); axpy_columns(1, i, 0, &vinv_iter); } elim_offdiag(&u_iter, &uinv_iter, &m_iter, &v_iter, &vinv_iter); // update iterators --m_iter.r; --m_iter.c; m_iter.p += (m_iter.Xr + m_iter.Xc); --u_iter.c; u_iter.p += u_iter.Xc; --uinv_iter.r; uinv_iter.p += uinv_iter.Xr; --v_iter.r; v_iter.p += v_iter.Xr; --vinv_iter.c; vinv_iter.p += vinv_iter.Xc; } return ndiag; } /*! * Compute a representation of a linear map by a smith normal form. * More precisely, for a linear map f:Z^r->Z^s, this function computes a commutative diagram * f * Z^r → Z^s * V ↑ ↑ U * Z^r → Z^s * S * where * - U and V are unimodular; * - S is in a Smith normal form. * \param a transformed into the matrix product a <> U. * \param m A representation matrix for f; transformed into S. * \param b transformed into the matrix product b <> V. * \pre Be sure that both a <> U and b <> V make sense. */ size_t smithRep(matrix_type * restrict a, matrix_type * restrict m, matrix_type * restrict b) { /* auxiliary matrix: column major */ matrix_type aux = { .p = calloc(a->r * a->r, sizeof(target_type)), .r = a->r, .c = a->r, .Xr = 1, .Xc = a->r, }; /* Initialize aux into the identity matrix. */ for(size_t i = 0; i < a->r; ++i) MATRIX_AT(aux,i,i) = 1; /* * Compute the Smith normal form of m and save the number of non-zero diagonals. * For cleaner image vectors, it is good to compute the Smith normal form of the transpose of m intead of m itself. */ transpose(m); transpose(b); size_t ndiag = smithNF(NULL, b, m, &aux, NULL); transpose(m); transpose(b); transpose(&aux); /* Multiply A by U which is as simple as possible. */ aux.c = ndiag; hermiteNF_LLL(1, (matrix_type*[]){a}, 0, (matrix_type*[]){}, &aux); /* We will not use aux any more. */ free(aux.p); /* Make the kernel vectors cleaner. */ if (ndiag < m->r) { matrix_type bker = { .p = b->p + ndiag * b->Xc, .r = b->c - ndiag, .c = b->r, .Xr = b->Xc, .Xc = b->Xr }; hermiteNF_LLL( 0, (matrix_type*[]){}, 0, (matrix_type*[]){}, &bker ); } return ndiag; }
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "type.h" void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, logical verified, char *npbversion, char *compiletime, char *cs1, char *cs2, char *cs3, char *cs4, char *cs5, char *cs6, char *cs7) { char size[16]; int j; int num_threads, max_threads; max_threads = 1; num_threads = 1; //figure out number of threads used #ifdef _OPENMP max_threads = omp_get_max_threads(); #pragma omp parallel shared(num_threads) { #pragma omp master num_threads = omp_get_num_threads(); } #endif printf("\n\n %s Benchmark Completed.\n", name); printf(" Class = %12c\n", class); //If this is not a grid - based problem(EP, FT, CG), then // we only print n1, which contains some measure of the // problem size.In that case, n2 and n3 are both zero. // Otherwise, we print the grid size n1xn2xn3 if ((n2 == 0) && (n3 == 0)) { if ((name[0] == 'E') && (name[1] == 'P')) { sprintf(size, "%15.0lf", pow(2.0, n1)); j = 14; if (size[j] == '.') { size[j] = ' '; j--; } size[j + 1] = '\0'; printf(" Size = %15s\n", size); } else { printf(" Size = %12d\n", n1); } } else { printf(" Size = %4dx%4dx%4d\n", n1, n2, n3); } printf(" Iterations = %12d\n", niter); printf(" Time in seconds = %12.2lf\n", t); printf(" Total threads = %12d\n", num_threads); printf(" Avail threads = %12d\n", max_threads); if (num_threads != max_threads) printf(" Warning: Threads used differ from threads available\n"); printf(" Mop/s total = %15.2lf\n", mops); printf(" Mop/s/thread = %15.2lf\n", mops / (double)num_threads); printf(" Operation type = %24s\n", optype); if (verified) printf(" Verification = %12s\n", "SUCCESSFUL"); else printf(" Verification = %12s\n", "UNSUCCESSFUL"); printf(" Version = %12s\n", npbversion); printf(" Compile date = %12s\n", compiletime); printf("\n Compile options:\n" " CC = %s\n", cs1); printf(" CLINK = %s\n", cs2); printf(" C_LIB = %s\n", cs3); printf(" C_INC = %s\n", cs4); printf(" CFLAGS = %s\n", cs5); printf(" CLINKFLAGS = %s\n", cs6); printf(" RAND = %s\n", cs7); printf("\n--------------------------------------\n" " Please send all errors/feedbacks to:\n" " Center for Manycore Programming\n" " cmp@aces.snu.ac.kr\n" " http://aces.snu.ac.kr\n" "--------------------------------------\n\n"); }
/****************************************************************************** * FILE: omp_bug2.c * DESCRIPTION: * Another OpenMP program with a bug. * AUTHOR: Blaise Barney * LAST REVISED: 04/06/05 ******************************************************************************/ // online source: https://computing.llnl.gov / tutorials / openMP / samples / C / omp_bug2.c #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int nthreads, i, tid; float total; /*** Spawn parallel region ***/ /* Obtain thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d is starting...\n", tid); /* do some work */ total = 0.0; for (i = 0; i < 1000000; i++) total = total + i * 1.0; printf("Thread %d is done! Total= %e\n", tid, total); /*** End of parallel region ***/ }
HYPRE_IJMatrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * HYPRE_IJMatrix interface * *****************************************************************************/ #include "./_hypre_IJ_mv.h" #include "../HYPRE.h" /*-------------------------------------------------------------------------- * HYPRE_IJMatrixCreate *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixCreate( MPI_Comm comm, HYPRE_BigInt ilower, HYPRE_BigInt iupper, HYPRE_BigInt jlower, HYPRE_BigInt jupper, HYPRE_IJMatrix *matrix ) { HYPRE_BigInt info[2]; HYPRE_Int num_procs; HYPRE_Int myid; hypre_IJMatrix *ijmatrix; HYPRE_BigInt row0, col0, rowN, colN; ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ijmatrix) = comm; hypre_IJMatrixObject(ijmatrix) = NULL; hypre_IJMatrixTranslator(ijmatrix) = NULL; hypre_IJMatrixAssumedPart(ijmatrix) = NULL; hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED; hypre_IJMatrixAssembleFlag(ijmatrix) = 0; hypre_IJMatrixPrintLevel(ijmatrix) = 0; hypre_IJMatrixOMPFlag(ijmatrix) = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); if (ilower > iupper + 1 || ilower < 0) { hypre_error_in_arg(2); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (iupper < -1) { hypre_error_in_arg(3); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jlower > jupper + 1 || jlower < 0) { hypre_error_in_arg(4); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jupper < -1) { hypre_error_in_arg(5); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_IJMatrixRowPartitioning(ijmatrix)[0] = ilower; hypre_IJMatrixRowPartitioning(ijmatrix)[1] = iupper + 1; hypre_IJMatrixColPartitioning(ijmatrix)[0] = jlower; hypre_IJMatrixColPartitioning(ijmatrix)[1] = jupper + 1; /* now we need the global number of rows and columns as well as the global first row and column index */ /* proc 0 has the first row and col */ if (myid == 0) { info[0] = ilower; info[1] = jlower; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm); row0 = info[0]; col0 = info[1]; /* proc (num_procs-1) has the last row and col */ if (myid == (num_procs - 1)) { info[0] = iupper; info[1] = jupper; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs - 1, comm); rowN = info[0]; colN = info[1]; hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0; hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0; hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1; hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1; *matrix = (HYPRE_IJMatrix) ijmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (ijmatrix) { if hypre_IJMatrixAssumedPart(ijmatrix) { hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix)); } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixDestroyParCSR( ijmatrix ); } else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 ) { hypre_error_in_arg(1); return hypre_error_flag; } } hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixInitializeParCSR( ijmatrix ) ; } else { hypre_error_in_arg(1); } return hypre_error_flag; } HYPRE_Int HYPRE_IJMatrixInitialize_v2( HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixInitializeParCSR_v2( ijmatrix, memory_location ) ; } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix, HYPRE_Int print_level ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixPrintLevel(ijmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- * This is a helper routine to compute a prefix sum of integer values. * * The current implementation is okay for modest numbers of threads. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PrefixSumInt(HYPRE_Int nvals, HYPRE_Int *vals, HYPRE_Int *sums) { HYPRE_Int j, nthreads, bsize; nthreads = hypre_NumThreads(); bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */ if (nvals < nthreads || bsize == 1) { sums[0] = 0; for (j = 1; j < nvals; j++) { sums[j] += sums[j - 1] + vals[j - 1]; } } else { /* Compute preliminary partial sums (in parallel) within each interval */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j + bsize), nvals); sums[0] = 0; for (i = j + 1; i < n; i++) { sums[i] = sums[i - 1] + vals[i - 1]; } } /* Compute final partial sums (in serial) for the first entry of every interval */ for (j = bsize; j < nvals; j += bsize) { sums[j] = sums[j - bsize] + sums[j - 1] + vals[j - 1]; } /* Compute final partial sums (in parallel) for the remaining entries */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = bsize; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j + bsize), nvals); for (i = j + 1; i < n; i++) { sums[i] += sums[j]; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "set"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return ( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR ) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "add"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) ); if (exec == HYPRE_EXEC_DEVICE) { return ( hypre_IJMatrixAssembleParCSRDevice( ijmatrix ) ); } else #endif { return ( hypre_IJMatrixAssembleParCSR( ijmatrix ) ); } } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_BigInt *rows, HYPRE_Int *ncols ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(3); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(4); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int *ncols, HYPRE_BigInt *rows, HYPRE_BigInt *cols, HYPRE_Complex *values ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols, rows, cols, values ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix, HYPRE_Int type ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixObjectType(ijmatrix) = type; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix, HYPRE_Int *type ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *type = hypre_IJMatrixObjectType(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix, HYPRE_BigInt *ilower, HYPRE_BigInt *iupper, HYPRE_BigInt *jlower, HYPRE_BigInt *jupper ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix); col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix); *ilower = row_partitioning[0]; *iupper = row_partitioning[1] - 1; *jlower = col_partitioning[0]; *jupper = col_partitioning[1] - 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /** Returns a pointer to an underlying ijmatrix type used to implement IJMatrix. Assumes that the implementation has an underlying matrix, so it would not work with a direct implementation of IJMatrix. @return integer error code @param IJMatrix [IN] The ijmatrix to be pointed to. */ HYPRE_Int HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix, void **object ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *object = hypre_IJMatrixObject( ijmatrix ); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix, const HYPRE_Int *sizes ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return ( hypre_IJMatrixSetRowSizesParCSR( ijmatrix, sizes ) ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix, const HYPRE_Int *diag_sizes, const HYPRE_Int *offdiag_sizes ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix, HYPRE_Int max_off_proc_elmts) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR ) { return ( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix, max_off_proc_elmts) ); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixRead * create IJMatrix on host memory *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixRead( const char *filename, MPI_Comm comm, HYPRE_Int type, HYPRE_IJMatrix *matrix_ptr ) { HYPRE_IJMatrix matrix; HYPRE_BigInt ilower, iupper, jlower, jupper; HYPRE_BigInt I, J; HYPRE_Int ncols; HYPRE_Complex value; HYPRE_Int myid, ret; char new_filename[255]; FILE *file; hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper); HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix); HYPRE_IJMatrixSetObjectType(matrix, type); HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST); /* It is important to ensure that whitespace follows the index value to help * catch mistakes in the input file. See comments in IJVectorRead(). */ ncols = 1; while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF ) { if (ret != 3) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file."); return hypre_error_flag; } if (I < ilower || I > iupper) { HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value); } else { HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value); } } HYPRE_IJMatrixAssemble(matrix); fclose(file); *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix, const char *filename ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) ) { hypre_error_in_arg(1); return hypre_error_flag; } void *object; HYPRE_IJMatrixGetObject(matrix, &object); HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object; HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix); if ( hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST ) { hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename); } else { HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename); hypre_ParCSRMatrixDestroy(par_csr2); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixSetOMPFlag *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix, HYPRE_Int omp_flag ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixTranspose( HYPRE_IJMatrix matrix_A, HYPRE_IJMatrix *matrix_AT ) { hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A; hypre_IJMatrix *ij_AT; HYPRE_Int i; if (!ij_A) { hypre_error_in_arg(1); return hypre_error_flag; } ij_AT = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ij_AT) = hypre_IJMatrixComm(ij_A); hypre_IJMatrixObject(ij_AT) = NULL; hypre_IJMatrixTranslator(ij_AT) = NULL; hypre_IJMatrixAssumedPart(ij_AT) = NULL; hypre_IJMatrixObjectType(ij_AT) = hypre_IJMatrixObjectType(ij_A); hypre_IJMatrixAssembleFlag(ij_AT) = 1; hypre_IJMatrixPrintLevel(ij_AT) = hypre_IJMatrixPrintLevel(ij_A); hypre_IJMatrixGlobalFirstRow(ij_AT) = hypre_IJMatrixGlobalFirstCol(ij_A); hypre_IJMatrixGlobalFirstCol(ij_AT) = hypre_IJMatrixGlobalFirstRow(ij_A); hypre_IJMatrixGlobalNumRows(ij_AT) = hypre_IJMatrixGlobalNumCols(ij_A); hypre_IJMatrixGlobalNumCols(ij_AT) = hypre_IJMatrixGlobalNumRows(ij_A); for (i = 0; i < 2; i++) { hypre_IJMatrixRowPartitioning(ij_AT)[i] = hypre_IJMatrixColPartitioning(ij_A)[i]; hypre_IJMatrixColPartitioning(ij_AT)[i] = hypre_IJMatrixRowPartitioning(ij_A)[i]; } if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR) { hypre_IJMatrixTransposeParCSR(ij_A, ij_AT); } else { hypre_error_in_arg(1); } *matrix_AT = (HYPRE_IJMatrix) ij_AT; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixNorm * * TODO: Add other norms *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixNorm( HYPRE_IJMatrix matrix, HYPRE_Real *norm ) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixNormParCSR(ijmatrix, norm); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixAdd *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAdd( HYPRE_Complex alpha, HYPRE_IJMatrix matrix_A, HYPRE_Complex beta, HYPRE_IJMatrix matrix_B, HYPRE_IJMatrix *matrix_C ) { hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A; hypre_IJMatrix *ij_B = (hypre_IJMatrix *) matrix_B; hypre_IJMatrix *ij_C; HYPRE_BigInt *row_partitioning_A; HYPRE_BigInt *col_partitioning_A; HYPRE_BigInt *row_partitioning_B; HYPRE_BigInt *col_partitioning_B; HYPRE_Int i; if (!ij_A) { hypre_error_in_arg(1); return hypre_error_flag; } /* Check if A and B have the same row/col partitionings */ row_partitioning_A = hypre_IJMatrixRowPartitioning(ij_A); row_partitioning_B = hypre_IJMatrixRowPartitioning(ij_B); col_partitioning_A = hypre_IJMatrixColPartitioning(ij_A); col_partitioning_B = hypre_IJMatrixColPartitioning(ij_B); for (i = 0; i < 2; i++) { if (row_partitioning_A[i] != row_partitioning_B[i]) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Input matrices must have same row partitioning!"); return hypre_error_flag; } if (col_partitioning_A[i] != col_partitioning_B[i]) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Input matrices must have same col partitioning!"); return hypre_error_flag; } } ij_C = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ij_C) = hypre_IJMatrixComm(ij_A); hypre_IJMatrixObject(ij_C) = NULL; hypre_IJMatrixTranslator(ij_C) = NULL; hypre_IJMatrixAssumedPart(ij_C) = NULL; hypre_IJMatrixObjectType(ij_C) = hypre_IJMatrixObjectType(ij_A); hypre_IJMatrixAssembleFlag(ij_C) = 1; hypre_IJMatrixPrintLevel(ij_C) = hypre_IJMatrixPrintLevel(ij_A); /* Copy row/col partitioning of A to C */ for (i = 0; i < 2; i++) { hypre_IJMatrixRowPartitioning(ij_C)[i] = row_partitioning_A[i]; hypre_IJMatrixColPartitioning(ij_C)[i] = col_partitioning_A[i]; } if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR) { hypre_IJMatrixAddParCSR(alpha, ij_A, beta, ij_B, ij_C); } else { hypre_error_in_arg(1); } *matrix_C = (HYPRE_IJMatrix) ij_C; return hypre_error_flag; }
/****************************************************************************** * * HYPRE_IJMatrix interface * *****************************************************************************/ #include "./_hypre_IJ_mv.h" #include "../HYPRE.h" /*-------------------------------------------------------------------------- * HYPRE_IJMatrixCreate *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixCreate(MPI_Comm comm, HYPRE_BigInt ilower, HYPRE_BigInt iupper, HYPRE_BigInt jlower, HYPRE_BigInt jupper, HYPRE_IJMatrix * matrix) { HYPRE_BigInt info[2]; HYPRE_Int num_procs; HYPRE_Int myid; hypre_IJMatrix *ijmatrix; HYPRE_BigInt row0, col0, rowN, colN; ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ijmatrix) = comm; hypre_IJMatrixObject(ijmatrix) = NULL; hypre_IJMatrixTranslator(ijmatrix) = NULL; hypre_IJMatrixAssumedPart(ijmatrix) = NULL; hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED; hypre_IJMatrixAssembleFlag(ijmatrix) = 0; hypre_IJMatrixPrintLevel(ijmatrix) = 0; hypre_IJMatrixOMPFlag(ijmatrix) = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); if (ilower > iupper + 1 || ilower < 0) { hypre_error_in_arg(2); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (iupper < -1) { hypre_error_in_arg(3); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jlower > jupper + 1 || jlower < 0) { hypre_error_in_arg(4); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jupper < -1) { hypre_error_in_arg(5); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_IJMatrixRowPartitioning(ijmatrix)[0] = ilower; hypre_IJMatrixRowPartitioning(ijmatrix)[1] = iupper + 1; hypre_IJMatrixColPartitioning(ijmatrix)[0] = jlower; hypre_IJMatrixColPartitioning(ijmatrix)[1] = jupper + 1; /* * now we need the global number of rows and columns as well as the * global first row and column index */ /* proc 0 has the first row and col */ if (myid == 0) { info[0] = ilower; info[1] = jlower; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm); row0 = info[0]; col0 = info[1]; /* proc (num_procs-1) has the last row and col */ if (myid == (num_procs - 1)) { info[0] = iupper; info[1] = jupper; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs - 1, comm); rowN = info[0]; colN = info[1]; hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0; hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0; hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1; hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1; *matrix = (HYPRE_IJMatrix) ijmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixDestroy(HYPRE_IJMatrix matrix) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (ijmatrix) { if hypre_IJMatrixAssumedPart (ijmatrix) { hypre_AssumedPartitionDestroy((hypre_IJAssumedPart *) hypre_IJMatrixAssumedPart(ijmatrix)); } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixDestroyParCSR(ijmatrix); } else if (hypre_IJMatrixObjectType(ijmatrix) != -1) { hypre_error_in_arg(1); return hypre_error_flag; } } hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixInitialize(HYPRE_IJMatrix matrix) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixInitializeParCSR(ijmatrix); } else { hypre_error_in_arg(1); } return hypre_error_flag; } HYPRE_Int HYPRE_IJMatrixInitialize_v2(HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixInitializeParCSR_v2(ijmatrix, memory_location); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetPrintLevel(HYPRE_IJMatrix matrix, HYPRE_Int print_level) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixPrintLevel(ijmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- * This is a helper routine to compute a prefix sum of integer values. * * The current implementation is okay for modest numbers of threads. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PrefixSumInt(HYPRE_Int nvals, HYPRE_Int * vals, HYPRE_Int * sums) { HYPRE_Int j, nthreads, bsize; nthreads = hypre_NumThreads(); bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the * remainder */ if (nvals < nthreads || bsize == 1) { sums[0] = 0; for (j = 1; j < nvals; j++) { sums[j] += sums[j - 1] + vals[j - 1]; } } else { /* * Compute preliminary partial sums (in parallel) within each * interval */ for (j = 0; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j + bsize), nvals); sums[0] = 0; for (i = j + 1; i < n; i++) { sums[i] = sums[i - 1] + vals[i - 1]; } } /* * Compute final partial sums (in serial) for the first entry of * every interval */ for (j = bsize; j < nvals; j += bsize) { sums[j] = sums[j - bsize] + sums[j - 1] + vals[j - 1]; } /* Compute final partial sums (in parallel) for the remaining entries */ for (j = bsize; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j + bsize), nvals); for (i = j + 1; i < n; i++) { sums[i] += sums[j]; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues2(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_Int * row_indexes, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "set"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetConstantValues(HYPRE_IJMatrix matrix, HYPRE_Complex value) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { return (hypre_IJMatrixSetConstantValuesParCSR(ijmatrix, value)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues2(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_Int * row_indexes, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "add"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAssemble(HYPRE_IJMatrix matrix) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)); if (exec == HYPRE_EXEC_DEVICE) { return (hypre_IJMatrixAssembleParCSRDevice(ijmatrix)); } else #endif { return (hypre_IJMatrixAssembleParCSR(ijmatrix)); } } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetRowCounts(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_BigInt * rows, HYPRE_Int * ncols) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(3); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(4); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixGetRowCountsParCSR(ijmatrix, nrows, rows, ncols); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetValues(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, HYPRE_BigInt * rows, HYPRE_BigInt * cols, HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixGetValuesParCSR(ijmatrix, nrows, ncols, rows, cols, values); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetObjectType(HYPRE_IJMatrix matrix, HYPRE_Int type) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixObjectType(ijmatrix) = type; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetObjectType(HYPRE_IJMatrix matrix, HYPRE_Int * type) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *type = hypre_IJMatrixObjectType(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetLocalRange(HYPRE_IJMatrix matrix, HYPRE_BigInt * ilower, HYPRE_BigInt * iupper, HYPRE_BigInt * jlower, HYPRE_BigInt * jupper) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix); col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix); *ilower = row_partitioning[0]; *iupper = row_partitioning[1] - 1; *jlower = col_partitioning[0]; *jupper = col_partitioning[1] - 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /** Returns a pointer to an underlying ijmatrix type used to implement IJMatrix. Assumes that the implementation has an underlying matrix, so it would not work with a direct implementation of IJMatrix. @return integer error code @param IJMatrix [IN] The ijmatrix to be pointed to. */ HYPRE_Int HYPRE_IJMatrixGetObject(HYPRE_IJMatrix matrix, void **object) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *object = hypre_IJMatrixObject(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetRowSizes(HYPRE_IJMatrix matrix, const HYPRE_Int * sizes) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { return (hypre_IJMatrixSetRowSizesParCSR(ijmatrix, sizes)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetDiagOffdSizes(HYPRE_IJMatrix matrix, const HYPRE_Int * diag_sizes, const HYPRE_Int * offdiag_sizes) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixSetDiagOffdSizesParCSR(ijmatrix, diag_sizes, offdiag_sizes); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetMaxOffProcElmts(HYPRE_IJMatrix matrix, HYPRE_Int max_off_proc_elmts) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { return (hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix, max_off_proc_elmts)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixRead * create IJMatrix on host memory *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixRead(const char *filename, MPI_Comm comm, HYPRE_Int type, HYPRE_IJMatrix * matrix_ptr) { HYPRE_IJMatrix matrix; HYPRE_BigInt ilower, iupper, jlower, jupper; HYPRE_BigInt I, J; HYPRE_Int ncols; HYPRE_Complex value; HYPRE_Int myid, ret; char new_filename[255]; FILE *file; hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper); HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix); HYPRE_IJMatrixSetObjectType(matrix, type); HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST); /* * It is important to ensure that whitespace follows the index value to * help catch mistakes in the input file. See comments in * IJVectorRead(). */ ncols = 1; while ((ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF) { if (ret != 3) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file."); return hypre_error_flag; } if (I < ilower || I > iupper) { HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value); } else { HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value); } } HYPRE_IJMatrixAssemble(matrix); fclose(file); *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixPrint(HYPRE_IJMatrix matrix, const char *filename) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ((hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR)) { hypre_error_in_arg(1); return hypre_error_flag; } void *object; HYPRE_IJMatrixGetObject(matrix, &object); HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object; HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix); if (hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST) { hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename); } else { HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename); hypre_ParCSRMatrixDestroy(par_csr2); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixSetOMPFlag *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetOMPFlag(HYPRE_IJMatrix matrix, HYPRE_Int omp_flag) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixTranspose(HYPRE_IJMatrix matrix_A, HYPRE_IJMatrix * matrix_AT) { hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A; hypre_IJMatrix *ij_AT; HYPRE_Int i; if (!ij_A) { hypre_error_in_arg(1); return hypre_error_flag; } ij_AT = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ij_AT) = hypre_IJMatrixComm(ij_A); hypre_IJMatrixObject(ij_AT) = NULL; hypre_IJMatrixTranslator(ij_AT) = NULL; hypre_IJMatrixAssumedPart(ij_AT) = NULL; hypre_IJMatrixObjectType(ij_AT) = hypre_IJMatrixObjectType(ij_A); hypre_IJMatrixAssembleFlag(ij_AT) = 1; hypre_IJMatrixPrintLevel(ij_AT) = hypre_IJMatrixPrintLevel(ij_A); hypre_IJMatrixGlobalFirstRow(ij_AT) = hypre_IJMatrixGlobalFirstCol(ij_A); hypre_IJMatrixGlobalFirstCol(ij_AT) = hypre_IJMatrixGlobalFirstRow(ij_A); hypre_IJMatrixGlobalNumRows(ij_AT) = hypre_IJMatrixGlobalNumCols(ij_A); hypre_IJMatrixGlobalNumCols(ij_AT) = hypre_IJMatrixGlobalNumRows(ij_A); for (i = 0; i < 2; i++) { hypre_IJMatrixRowPartitioning(ij_AT)[i] = hypre_IJMatrixColPartitioning(ij_A)[i]; hypre_IJMatrixColPartitioning(ij_AT)[i] = hypre_IJMatrixRowPartitioning(ij_A)[i]; } if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR) { hypre_IJMatrixTransposeParCSR(ij_A, ij_AT); } else { hypre_error_in_arg(1); } *matrix_AT = (HYPRE_IJMatrix) ij_AT; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixNorm * * TODO: Add other norms *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixNorm(HYPRE_IJMatrix matrix, HYPRE_Real * norm) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixNormParCSR(ijmatrix, norm); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixAdd *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAdd(HYPRE_Complex alpha, HYPRE_IJMatrix matrix_A, HYPRE_Complex beta, HYPRE_IJMatrix matrix_B, HYPRE_IJMatrix * matrix_C) { hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A; hypre_IJMatrix *ij_B = (hypre_IJMatrix *) matrix_B; hypre_IJMatrix *ij_C; HYPRE_BigInt *row_partitioning_A; HYPRE_BigInt *col_partitioning_A; HYPRE_BigInt *row_partitioning_B; HYPRE_BigInt *col_partitioning_B; HYPRE_Int i; if (!ij_A) { hypre_error_in_arg(1); return hypre_error_flag; } /* Check if A and B have the same row/col partitionings */ row_partitioning_A = hypre_IJMatrixRowPartitioning(ij_A); row_partitioning_B = hypre_IJMatrixRowPartitioning(ij_B); col_partitioning_A = hypre_IJMatrixColPartitioning(ij_A); col_partitioning_B = hypre_IJMatrixColPartitioning(ij_B); for (i = 0; i < 2; i++) { if (row_partitioning_A[i] != row_partitioning_B[i]) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Input matrices must have same row partitioning!"); return hypre_error_flag; } if (col_partitioning_A[i] != col_partitioning_B[i]) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Input matrices must have same col partitioning!"); return hypre_error_flag; } } ij_C = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ij_C) = hypre_IJMatrixComm(ij_A); hypre_IJMatrixObject(ij_C) = NULL; hypre_IJMatrixTranslator(ij_C) = NULL; hypre_IJMatrixAssumedPart(ij_C) = NULL; hypre_IJMatrixObjectType(ij_C) = hypre_IJMatrixObjectType(ij_A); hypre_IJMatrixAssembleFlag(ij_C) = 1; hypre_IJMatrixPrintLevel(ij_C) = hypre_IJMatrixPrintLevel(ij_A); /* Copy row/col partitioning of A to C */ for (i = 0; i < 2; i++) { hypre_IJMatrixRowPartitioning(ij_C)[i] = row_partitioning_A[i]; hypre_IJMatrixColPartitioning(ij_C)[i] = col_partitioning_A[i]; } if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR) { hypre_IJMatrixAddParCSR(alpha, ij_A, beta, ij_B, ij_C); } else { hypre_error_in_arg(1); } *matrix_C = (HYPRE_IJMatrix) ij_C; return hypre_error_flag; }
/****************************************************************************** * * HYPRE_IJMatrix interface * *****************************************************************************/ #include "./_hypre_IJ_mv.h" #include "../HYPRE.h" /*-------------------------------------------------------------------------- * HYPRE_IJMatrixCreate *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixCreate(MPI_Comm comm, HYPRE_BigInt ilower, HYPRE_BigInt iupper, HYPRE_BigInt jlower, HYPRE_BigInt jupper, HYPRE_IJMatrix * matrix) { HYPRE_BigInt info[2]; HYPRE_Int num_procs; HYPRE_Int myid; hypre_IJMatrix *ijmatrix; HYPRE_BigInt row0, col0, rowN, colN; ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ijmatrix) = comm; hypre_IJMatrixObject(ijmatrix) = NULL; hypre_IJMatrixTranslator(ijmatrix) = NULL; hypre_IJMatrixAssumedPart(ijmatrix) = NULL; hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED; hypre_IJMatrixAssembleFlag(ijmatrix) = 0; hypre_IJMatrixPrintLevel(ijmatrix) = 0; hypre_IJMatrixOMPFlag(ijmatrix) = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); if (ilower > iupper + 1 || ilower < 0) { hypre_error_in_arg(2); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (iupper < -1) { hypre_error_in_arg(3); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jlower > jupper + 1 || jlower < 0) { hypre_error_in_arg(4); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } if (jupper < -1) { hypre_error_in_arg(5); hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_IJMatrixRowPartitioning(ijmatrix)[0] = ilower; hypre_IJMatrixRowPartitioning(ijmatrix)[1] = iupper + 1; hypre_IJMatrixColPartitioning(ijmatrix)[0] = jlower; hypre_IJMatrixColPartitioning(ijmatrix)[1] = jupper + 1; /* * now we need the global number of rows and columns as well as the * global first row and column index */ /* proc 0 has the first row and col */ if (myid == 0) { info[0] = ilower; info[1] = jlower; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm); row0 = info[0]; col0 = info[1]; /* proc (num_procs-1) has the last row and col */ if (myid == (num_procs - 1)) { info[0] = iupper; info[1] = jupper; } hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs - 1, comm); rowN = info[0]; colN = info[1]; hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0; hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0; hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1; hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1; *matrix = (HYPRE_IJMatrix) ijmatrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixDestroy(HYPRE_IJMatrix matrix) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (ijmatrix) { if hypre_IJMatrixAssumedPart (ijmatrix) { hypre_AssumedPartitionDestroy((hypre_IJAssumedPart *) hypre_IJMatrixAssumedPart(ijmatrix)); } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixDestroyParCSR(ijmatrix); } else if (hypre_IJMatrixObjectType(ijmatrix) != -1) { hypre_error_in_arg(1); return hypre_error_flag; } } hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixInitialize(HYPRE_IJMatrix matrix) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixInitializeParCSR(ijmatrix); } else { hypre_error_in_arg(1); } return hypre_error_flag; } HYPRE_Int HYPRE_IJMatrixInitialize_v2(HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixInitializeParCSR_v2(ijmatrix, memory_location); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetPrintLevel(HYPRE_IJMatrix matrix, HYPRE_Int print_level) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixPrintLevel(ijmatrix) = 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- * This is a helper routine to compute a prefix sum of integer values. * * The current implementation is okay for modest numbers of threads. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PrefixSumInt(HYPRE_Int nvals, HYPRE_Int * vals, HYPRE_Int * sums) { HYPRE_Int j, nthreads, bsize; nthreads = hypre_NumThreads(); bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the * remainder */ if (nvals < nthreads || bsize == 1) { sums[0] = 0; for (j = 1; j < nvals; j++) { sums[j] += sums[j - 1] + vals[j - 1]; } } else { /* * Compute preliminary partial sums (in parallel) within each * interval */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j + bsize), nvals); sums[0] = 0; for (i = j + 1; i < n; i++) { sums[i] = sums[i - 1] + vals[i - 1]; } } /* * Compute final partial sums (in serial) for the first entry of * every interval */ for (j = bsize; j < nvals; j += bsize) { sums[j] = sums[j - bsize] + sums[j - 1] + vals[j - 1]; } /* Compute final partial sums (in parallel) for the remaining entries */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = bsize; j < nvals; j += bsize) { HYPRE_Int i, n = hypre_min((j + bsize), nvals); for (i = j + 1; i < n; i++) { sums[i] += sums[j]; } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetValues2(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_Int * row_indexes, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "set"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetConstantValues(HYPRE_IJMatrix matrix, HYPRE_Complex value) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { return (hypre_IJMatrixSetConstantValuesParCSR(ijmatrix, value)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAddToValues2(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, const HYPRE_BigInt * rows, const HYPRE_Int * row_indexes, const HYPRE_BigInt * cols, const HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* * if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } */ if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(6); return hypre_error_flag; } if (!values) { hypre_error_in_arg(7); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR) { hypre_error_in_arg(1); return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)); if (exec == HYPRE_EXEC_DEVICE) { hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values, "add"); } else #endif { HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes; HYPRE_Int *ncols_tmp = ncols; if (!ncols_tmp) { HYPRE_Int i; ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); for (i = 0; i < nrows; i++) { ncols_tmp[i] = 1; } } if (!row_indexes) { row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST); hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp); } if (hypre_IJMatrixOMPFlag(ijmatrix)) { hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } else { hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values); } if (!ncols) { hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST); } if (!row_indexes) { hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAssemble(HYPRE_IJMatrix matrix) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)); if (exec == HYPRE_EXEC_DEVICE) { return (hypre_IJMatrixAssembleParCSRDevice(ijmatrix)); } else #endif { return (hypre_IJMatrixAssembleParCSR(ijmatrix)); } } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetRowCounts(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_BigInt * rows, HYPRE_Int * ncols) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (nrows < 0) { hypre_error_in_arg(2); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(3); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(4); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixGetRowCountsParCSR(ijmatrix, nrows, rows, ncols); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetValues(HYPRE_IJMatrix matrix, HYPRE_Int nrows, HYPRE_Int * ncols, HYPRE_BigInt * rows, HYPRE_BigInt * cols, HYPRE_Complex * values) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (nrows == 0) { return hypre_error_flag; } if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (!ncols) { hypre_error_in_arg(3); return hypre_error_flag; } if (!rows) { hypre_error_in_arg(4); return hypre_error_flag; } if (!cols) { hypre_error_in_arg(5); return hypre_error_flag; } if (!values) { hypre_error_in_arg(6); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixGetValuesParCSR(ijmatrix, nrows, ncols, rows, cols, values); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetObjectType(HYPRE_IJMatrix matrix, HYPRE_Int type) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixObjectType(ijmatrix) = type; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetObjectType(HYPRE_IJMatrix matrix, HYPRE_Int * type) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *type = hypre_IJMatrixObjectType(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixGetLocalRange(HYPRE_IJMatrix matrix, HYPRE_BigInt * ilower, HYPRE_BigInt * iupper, HYPRE_BigInt * jlower, HYPRE_BigInt * jupper) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix); col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix); *ilower = row_partitioning[0]; *iupper = row_partitioning[1] - 1; *jlower = col_partitioning[0]; *jupper = col_partitioning[1] - 1; return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /** Returns a pointer to an underlying ijmatrix type used to implement IJMatrix. Assumes that the implementation has an underlying matrix, so it would not work with a direct implementation of IJMatrix. @return integer error code @param IJMatrix [IN] The ijmatrix to be pointed to. */ HYPRE_Int HYPRE_IJMatrixGetObject(HYPRE_IJMatrix matrix, void **object) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } *object = hypre_IJMatrixObject(ijmatrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetRowSizes(HYPRE_IJMatrix matrix, const HYPRE_Int * sizes) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { return (hypre_IJMatrixSetRowSizesParCSR(ijmatrix, sizes)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetDiagOffdSizes(HYPRE_IJMatrix matrix, const HYPRE_Int * diag_sizes, const HYPRE_Int * offdiag_sizes) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixSetDiagOffdSizesParCSR(ijmatrix, diag_sizes, offdiag_sizes); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetMaxOffProcElmts(HYPRE_IJMatrix matrix, HYPRE_Int max_off_proc_elmts) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { return (hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix, max_off_proc_elmts)); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixRead * create IJMatrix on host memory *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixRead(const char *filename, MPI_Comm comm, HYPRE_Int type, HYPRE_IJMatrix * matrix_ptr) { HYPRE_IJMatrix matrix; HYPRE_BigInt ilower, iupper, jlower, jupper; HYPRE_BigInt I, J; HYPRE_Int ncols; HYPRE_Complex value; HYPRE_Int myid, ret; char new_filename[255]; FILE *file; hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper); HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix); HYPRE_IJMatrixSetObjectType(matrix, type); HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST); /* * It is important to ensure that whitespace follows the index value to * help catch mistakes in the input file. See comments in * IJVectorRead(). */ ncols = 1; while ((ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF) { if (ret != 3) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file."); return hypre_error_flag; } if (I < ilower || I > iupper) { HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value); } else { HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value); } } HYPRE_IJMatrixAssemble(matrix); fclose(file); *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixPrint(HYPRE_IJMatrix matrix, const char *filename) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } if ((hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR)) { hypre_error_in_arg(1); return hypre_error_flag; } void *object; HYPRE_IJMatrixGetObject(matrix, &object); HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object; HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix); if (hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST) { hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename); } else { HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename); hypre_ParCSRMatrixDestroy(par_csr2); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixSetOMPFlag *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixSetOMPFlag(HYPRE_IJMatrix matrix, HYPRE_Int omp_flag) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixTranspose(HYPRE_IJMatrix matrix_A, HYPRE_IJMatrix * matrix_AT) { hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A; hypre_IJMatrix *ij_AT; HYPRE_Int i; if (!ij_A) { hypre_error_in_arg(1); return hypre_error_flag; } ij_AT = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ij_AT) = hypre_IJMatrixComm(ij_A); hypre_IJMatrixObject(ij_AT) = NULL; hypre_IJMatrixTranslator(ij_AT) = NULL; hypre_IJMatrixAssumedPart(ij_AT) = NULL; hypre_IJMatrixObjectType(ij_AT) = hypre_IJMatrixObjectType(ij_A); hypre_IJMatrixAssembleFlag(ij_AT) = 1; hypre_IJMatrixPrintLevel(ij_AT) = hypre_IJMatrixPrintLevel(ij_A); hypre_IJMatrixGlobalFirstRow(ij_AT) = hypre_IJMatrixGlobalFirstCol(ij_A); hypre_IJMatrixGlobalFirstCol(ij_AT) = hypre_IJMatrixGlobalFirstRow(ij_A); hypre_IJMatrixGlobalNumRows(ij_AT) = hypre_IJMatrixGlobalNumCols(ij_A); hypre_IJMatrixGlobalNumCols(ij_AT) = hypre_IJMatrixGlobalNumRows(ij_A); for (i = 0; i < 2; i++) { hypre_IJMatrixRowPartitioning(ij_AT)[i] = hypre_IJMatrixColPartitioning(ij_A)[i]; hypre_IJMatrixColPartitioning(ij_AT)[i] = hypre_IJMatrixRowPartitioning(ij_A)[i]; } if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR) { hypre_IJMatrixTransposeParCSR(ij_A, ij_AT); } else { hypre_error_in_arg(1); } *matrix_AT = (HYPRE_IJMatrix) ij_AT; return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixNorm * * TODO: Add other norms *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixNorm(HYPRE_IJMatrix matrix, HYPRE_Real * norm) { hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix; if (!ijmatrix) { hypre_error_in_arg(1); return hypre_error_flag; } if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR) { hypre_IJMatrixNormParCSR(ijmatrix, norm); } else { hypre_error_in_arg(1); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_IJMatrixAdd *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_IJMatrixAdd(HYPRE_Complex alpha, HYPRE_IJMatrix matrix_A, HYPRE_Complex beta, HYPRE_IJMatrix matrix_B, HYPRE_IJMatrix * matrix_C) { hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A; hypre_IJMatrix *ij_B = (hypre_IJMatrix *) matrix_B; hypre_IJMatrix *ij_C; HYPRE_BigInt *row_partitioning_A; HYPRE_BigInt *col_partitioning_A; HYPRE_BigInt *row_partitioning_B; HYPRE_BigInt *col_partitioning_B; HYPRE_Int i; if (!ij_A) { hypre_error_in_arg(1); return hypre_error_flag; } /* Check if A and B have the same row/col partitionings */ row_partitioning_A = hypre_IJMatrixRowPartitioning(ij_A); row_partitioning_B = hypre_IJMatrixRowPartitioning(ij_B); col_partitioning_A = hypre_IJMatrixColPartitioning(ij_A); col_partitioning_B = hypre_IJMatrixColPartitioning(ij_B); for (i = 0; i < 2; i++) { if (row_partitioning_A[i] != row_partitioning_B[i]) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Input matrices must have same row partitioning!"); return hypre_error_flag; } if (col_partitioning_A[i] != col_partitioning_B[i]) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Input matrices must have same col partitioning!"); return hypre_error_flag; } } ij_C = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST); hypre_IJMatrixComm(ij_C) = hypre_IJMatrixComm(ij_A); hypre_IJMatrixObject(ij_C) = NULL; hypre_IJMatrixTranslator(ij_C) = NULL; hypre_IJMatrixAssumedPart(ij_C) = NULL; hypre_IJMatrixObjectType(ij_C) = hypre_IJMatrixObjectType(ij_A); hypre_IJMatrixAssembleFlag(ij_C) = 1; hypre_IJMatrixPrintLevel(ij_C) = hypre_IJMatrixPrintLevel(ij_A); /* Copy row/col partitioning of A to C */ for (i = 0; i < 2; i++) { hypre_IJMatrixRowPartitioning(ij_C)[i] = row_partitioning_A[i]; hypre_IJMatrixColPartitioning(ij_C)[i] = col_partitioning_A[i]; } if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR) { hypre_IJMatrixAddParCSR(alpha, ij_A, beta, ij_B, ij_C); } else { hypre_error_in_arg(1); } *matrix_C = (HYPRE_IJMatrix) ij_C; return hypre_error_flag; }
omp3.c
// note not doing O0 below as to ensure we get tbaa // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // note not doing O0 below as to ensure we get tbaa // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -Xclang -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi # include <stdlib.h> # include <stdio.h> void msg(double* in, int *len, unsigned int slen) { if (slen != 0) { #pragma omp parallel for firstprivate(slen) for (unsigned int i=0; i<slen; i++) { /* int L = len[i] / 2; __builtin_assume(L > 0); for(int j=0; j<L; j++) in[j*10+i] *= L; len[i] = 0; */ } } } void __enzyme_autodiff(void*, ...); int main ( int argc, char *argv[] ) { double array[200]; double darray[200]; int len[10] = {20}; int slen = 10; __enzyme_autodiff((void*)msg, &array, &darray, &len, slen); return 0; }
// note not doing O0 below as to ensure we get tbaa // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 - disable - llvm - optzns % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O2 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O3 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // note not doing O0 below as to ensure we get tbaa // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 - Xclang - disable - llvm - optzns % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O2 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O3 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi #include <stdlib.h> #include <stdio.h> void msg(double *in, int *len, unsigned int slen) { if (slen != 0) { for (unsigned int i = 0; i < slen; i++) { /* * int L = len[i] / 2; __builtin_assume(L > 0); for(int j=0; j<L; * j++) in[j*10+i] *= L; len[i] = 0; */ } } } void __enzyme_autodiff(void *,...); int main(int argc, char *argv[]) { double array[200]; double darray[200]; int len[10] = {20}; int slen = 10; __enzyme_autodiff((void *)msg, &array, &darray, &len, slen); return 0; }
// note not doing O0 below as to ensure we get tbaa // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 - disable - llvm - optzns % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O2 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O3 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // note not doing O0 below as to ensure we get tbaa // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 - Xclang - disable - llvm - optzns % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O1 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O2 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi // RUN:if[%llvmver - ge 9]; then % clang - fopenmp - std = c11 - fno - vectorize - fno - unroll - loops - O3 % s - S - emit - llvm - o - |%opt - %loadEnzyme - enzyme - enzyme - inline = 1 - S | %clang - fopenmp - x ir - -o % s.out && %s.out; fi #include <stdlib.h> #include <stdio.h> void msg(double *in, int *len, unsigned int slen) { if (slen != 0) { #pragma omp parallel for firstprivate(slen) for (unsigned int i = 0; i < slen; i++) { /* * int L = len[i] / 2; __builtin_assume(L > 0); for(int j=0; j<L; * j++) in[j*10+i] *= L; len[i] = 0; */ } } } void __enzyme_autodiff(void *,...); int main(int argc, char *argv[]) { double array[200]; double darray[200]; int len[10] = {20}; int slen = 10; __enzyme_autodiff((void *)msg, &array, &darray, &len, slen); return 0; }
GB_unaryop__minv_int16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_bool // op(A') function: GB_tran__minv_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_bool // op(A') function: GB_tran__minv_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int16_bool // op(A') function: GB_tran__minv_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 16) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 16) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
solver.c
#include "ns/solver.h" #include "ns/config.h" #include <stdlib.h> #include <stdio.h> // Data wrapper typedef struct ns_t { // World uint64_t world_width; uint64_t world_width_bounds; uint64_t world_height; uint64_t world_height_bounds; // Fluid double viscosity; double density; double diffusion; // Time double time_step; // World data double **u; double **u_prev; double **v; double **v_prev; double **dense; double **dense_prev; } ns_t; /** * Private definitions */ static void ns_velocity_step(ns_t *ns); static void ns_density_step(ns_t *ns); static void ns_add_sources_to_targets(const ns_t *ns); static void ns_diffuse(const ns_t *ns, uint64_t bounds, double diffusion_value, double **target, const double **source); static void ns_project(ns_t *ns); static void ns_advect(const ns_t *ns, uint64_t bounds, double **d, double **d0, double **u, double **v); static void ns_set_bounds(const ns_t *ns, uint64_t bounds, double **target); static void ns_swap_matrix(double ***x, double ***y); static bool is_valid_coordinate(const ns_t *ns, uint64_t x, uint64_t y); /** * END Private definitions */ /** * Public */ ns_t *ns_create(uint64_t world_width, uint64_t world_height, double viscosity, double density, double diffusion, double time_step) { uint64_t i; bool error = false; ns_t *ns = NULL; ns = (ns_t *) malloc(sizeof(ns_t)); if (ns == NULL) return NULL; // World ns->world_width = world_width; ns->world_width_bounds = ns->world_width + 2; ns->world_height = world_height; ns->world_height_bounds = ns->world_height + 2; // Fluid ns->viscosity = viscosity; ns->density = density; ns->diffusion = diffusion; // Time ns->time_step = time_step; // Allocate world data ns->u = (double **) calloc(ns->world_height_bounds, sizeof(double *)); ns->u_prev = (double **) calloc(ns->world_height_bounds, sizeof(double *)); ns->v = (double **) calloc(ns->world_height_bounds, sizeof(double *)); ns->v_prev = (double **) calloc(ns->world_height_bounds, sizeof(double *)); ns->dense = (double **) calloc(ns->world_height_bounds, sizeof(double *)); ns->dense_prev = (double **) calloc(ns->world_height_bounds, sizeof(double *)); if (ns->u == NULL || ns->u_prev == NULL || ns->v == NULL || ns->v_prev == NULL || ns->dense == NULL || ns->dense_prev == NULL) { error = true; } if (!error) { #pragma omp parallel for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(i) shared(ns, error) for (i = 0; i < ns->world_height_bounds; ++i) { if (error) continue; ns->u[i] = (double *) calloc(ns->world_width_bounds, sizeof(double)); ns->u_prev[i] = (double *) calloc(ns->world_width_bounds, sizeof(double)); ns->v[i] = (double *) calloc(ns->world_width_bounds, sizeof(double)); ns->v_prev[i] = (double *) calloc(ns->world_width_bounds, sizeof(double)); ns->dense[i] = (double *) calloc(ns->world_width_bounds, sizeof(double)); ns->dense_prev[i] = (double *) calloc(ns->world_width_bounds, sizeof(double)); if (ns->u[i] == NULL || ns->u_prev[i] == NULL || ns->v[i] == NULL || ns->v_prev[i] == NULL || ns->dense[i] == NULL || ns->dense_prev[i] == NULL) { #pragma omp critical error = true; } } } if (error) { ns_free(ns); return NULL; } return ns; } void ns_free(ns_t *ns) { if (ns == NULL) return; uint64_t i; #pragma omp parallel for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(i) shared(ns) for (i = 0; i < ns->world_height_bounds; ++i) { if (ns->u != NULL) free(ns->u[i]); if (ns->u_prev != NULL) free(ns->u_prev[i]); if (ns->v != NULL) free(ns->v[i]); if (ns->v_prev != NULL) free(ns->v_prev[i]); if (ns->dense != NULL) free(ns->dense[i]); if (ns->dense_prev != NULL) free(ns->dense_prev[i]); } free(ns->u); free(ns->u_prev); free(ns->v); free(ns->v_prev); free(ns->dense); free(ns->dense_prev); free(ns); } void ns_tick(ns_t *ns) { ns_velocity_step(ns); ns_density_step(ns); } bool ns_increase_density(ns_t *ns, uint64_t x, uint64_t y) { bool status = false; // Fix due to bounds x += 1; y += 1; if (!is_valid_coordinate(ns, x, y)) fprintf(stderr, "Invalid increase_density coordinates {x: %ld, y: %ld}\n", x, y); else status = true; if (status) ns->dense[y][x] += ns->density; return status; } bool ns_apply_force(ns_t *ns, uint64_t x, uint64_t y, double v_x, double v_y) { bool status = false; // Fix due to bounds x += 1; y += 1; if (!is_valid_coordinate(ns, x, y)) fprintf(stderr, "Invalid apply_force coordinates {x: %ld, y: %ld}\n", x, y); else if (v_x > NS_MAX_FORCE_VELOCITY || v_y > NS_MAX_FORCE_VELOCITY) fprintf(stdout, "Invalid apply_force velocity {v_x: %lf, v_y: %lf}\n", v_x, v_y); else status = true; if (status) { ns->u[y][x] = v_x != 0 ? v_x : ns->u[y][x]; ns->v[y][x] = v_y != 0 ? v_y : ns->v[y][x]; } return status; } ns_world_t *ns_get_world(const ns_t *ns) { uint64_t i, x, y; ns_world_t *world = (ns_world_t *) malloc(sizeof(ns_world_t)); world->world_width = ns->world_width; world->world_width_bounds = ns->world_width_bounds; world->world_height = ns->world_height; world->world_height_bounds = ns->world_height_bounds; world->world = (ns_cell_t **) calloc(ns->world_height_bounds, sizeof(ns_cell_t *)); #pragma omp parallel \ default(none) private(i) shared(ns, world) { #pragma omp for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) for (i = 0; i < ns->world_height_bounds; ++i) world->world[i] = (ns_cell_t *) calloc(ns->world_width_bounds, sizeof(ns_cell_t)); #pragma omp for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) for (y = 0; y < ns->world_height_bounds; ++y) { for (x = 0; x < ns->world_width_bounds; ++x) { ns_cell_t cell; cell.u = &ns->u[y][x]; cell.v = &ns->v[y][x]; cell.density = &ns->dense[y][x]; world->world[y][x] = cell; } } } return world; } void ns_free_world(ns_world_t *world) { uint64_t i; #pragma omp parallel for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(i) shared(world) for (i = 0; i < world->world_height_bounds; ++i) { free(world->world[i]); } free(world->world); free(world); } /** * END Public */ /** * Private */ static void ns_velocity_step(ns_t *ns) { ns_add_sources_to_targets(ns); ns_swap_matrix(&ns->u_prev, &ns->u); ns_diffuse(ns, 1, ns->viscosity, ns->u, (const double **) ns->u_prev); ns_swap_matrix(&ns->v_prev, &ns->v); ns_diffuse(ns, 2, ns->viscosity, ns->v, (const double **) ns->v_prev); ns_project(ns); ns_swap_matrix(&ns->u_prev, &ns->u); ns_swap_matrix(&ns->v_prev, &ns->v); ns_advect(ns, 1, ns->u, ns->u_prev, ns->u_prev, ns->v_prev); ns_advect(ns, 2, ns->v, ns->v_prev, ns->u_prev, ns->v_prev); ns_project(ns); } static void ns_density_step(ns_t *ns) { ns_swap_matrix(&ns->dense_prev, &ns->dense); ns_diffuse(ns, 0, ns->diffusion, ns->dense, (const double **) ns->dense_prev); ns_swap_matrix(&ns->dense_prev, &ns->dense); ns_advect(ns, 0, ns->dense, ns->dense_prev, ns->u, ns->v); } static void ns_add_sources_to_targets(const ns_t *ns) { uint64_t x, y; #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x) shared(ns) for (y = 0; y < ns->world_height_bounds; ++y) { for (x = 0; x < ns->world_width_bounds; ++x) { ns->u[y][x] += ns->time_step * ns->u_prev[y][x]; ns->v[y][x] += ns->time_step * ns->v_prev[y][x]; } } } static void ns_diffuse(const ns_t *ns, uint64_t bounds, double diffusion_value, double **target, const double **source) { const double a = ns->time_step * diffusion_value * (double) ns->world_width * (double) ns->world_height; for (uint64_t k = 0; k < 20; k++) { for (uint64_t y = 1; y <= ns->world_height; ++y) { for (uint64_t x = 1; x <= ns->world_width; ++x) { target[y][x] = (source[y][x] + a * (target[y][x - 1] + target[y][x + 1] + target[y - 1][x] + target[y + 1][x])) / (1 + 4 * a); } } ns_set_bounds(ns, bounds, target); } } static void ns_project(ns_t *ns) { uint64_t x, y; double h = 1.0 / (double) ns->world_width; for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->v_prev[y][x] = -0.5 * h * (ns->u[y][x + 1] - ns->u[y][x - 1] + ns->v[y + 1][x] - ns->v[y - 1][x]); ns->u_prev[y][x] = 0; } } ns_set_bounds(ns, 0, ns->v_prev); ns_set_bounds(ns, 0, ns->u_prev); for (uint64_t k = 0; k < 20; k++) { for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->u_prev[y][x] = (ns->v_prev[y][x] + ns->u_prev[y][x - 1] + ns->u_prev[y][x + 1] + ns->u_prev[y - 1][x] + ns->u_prev[y + 1][x]) / 4; } } ns_set_bounds(ns, 0, ns->u_prev); } #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x) shared(ns, h) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->u[y][x] -= 0.5 * (ns->u_prev[y][x + 1] - ns->u_prev[y][x - 1]) / h; ns->v[y][x] -= 0.5 * (ns->u_prev[y + 1][x] - ns->u_prev[y - 1][x]) / h; } } ns_set_bounds(ns, 1, ns->u); ns_set_bounds(ns, 2, ns->v); } static void ns_advect(const ns_t *ns, uint64_t bounds, double **d, double **d0, double **u, double **v) { uint64_t x, y, x0, x1, y0, y1; double xx, yy, s0, s1, t0, t1; double dt0_width = ns->time_step * (double) ns->world_width; double dt0_height = ns->time_step * (double) ns->world_height; #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x, yy, xx, x0, x1, y0, y1, s0, s1, t0, t1) shared(ns, dt0_width, dt0_height, u, v, d, d0) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { xx = (double) x - dt0_width * u[y][x]; yy = (double) y - dt0_height * v[y][x]; // Check xx if (xx < 0.5) xx = 0.5; if (xx > (double) ns->world_width + 0.5) xx = (double) ns->world_width + 0.5; x0 = (uint64_t) xx; x1 = x0 + 1; // Check yy if (yy < 0.5) yy = 0.5; if (yy > (double) ns->world_height + 0.5) yy = (double) ns->world_height + 0.5; y0 = (uint64_t) yy; y1 = y0 + 1; s1 = xx - (double) x0; s0 = 1 - s1; t1 = yy - (double) y0; t0 = 1 - t1; d[y][x] = s0 * (t0 * d0[y0][x0] + t1 * d0[y1][x0]) + s1 * (t0 * d0[y0][x1] + t1 * d0[y1][x1]); } } ns_set_bounds(ns, bounds, d); } static void ns_set_bounds(const ns_t *ns, uint64_t bounds, double **target) { uint64_t y; uint64_t x; #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x) shared(ns, target, bounds) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { target[y][0] = (bounds == 1) ? -target[y][1] : target[y][1]; target[y][ns->world_width + 1] = bounds == 1 ? -target[y][ns->world_width] : target[y][ns->world_width]; target[0][x] = bounds == 2 ? -target[1][x] : target[1][x]; target[ns->world_height + 1][x] = bounds == 2 ? -target[ns->world_height][x] : target[ns->world_height][x]; } } target[0][0] = 0.5 * (target[0][1] + target[1][0]); target[ns->world_height + 1][0] = 0.5 * (target[ns->world_height + 1][1] + target[ns->world_height][0]); target[0][ns->world_width + 1] = 0.5 * (target[0][ns->world_width] + target[1][ns->world_width + 1]); target[ns->world_height + 1][ns->world_width + 1] = 0.5 * (target[ns->world_height + 1][ns->world_width] + target[ns->world_height][ns->world_width + 1]); } static void ns_swap_matrix(double ***x, double ***y) { double **tmp = *x; *x = *y; *y = tmp; } static bool is_valid_coordinate(const ns_t *ns, uint64_t x, uint64_t y) { return x >= 0 && x < ns->world_width_bounds && y >= 0 && y < ns->world_height_bounds; } /** * END Private */
#include "ns/solver.h" #include "ns/config.h" #include <stdlib.h> #include <stdio.h> // Data wrapper typedef struct ns_t { //World uint64_t world_width; uint64_t world_width_bounds; uint64_t world_height; uint64_t world_height_bounds; //Fluid double viscosity; double density; double diffusion; //Time double time_step; //World data double **u; double **u_prev; double **v; double **v_prev; double **dense; double **dense_prev; } ns_t; /** * Private definitions */ static void ns_velocity_step(ns_t * ns); static void ns_density_step(ns_t * ns); static void ns_add_sources_to_targets(const ns_t * ns); static void ns_diffuse(const ns_t * ns, uint64_t bounds, double diffusion_value, double **target, const double **source); static void ns_project(ns_t * ns); static void ns_advect(const ns_t * ns, uint64_t bounds, double **d, double **d0, double **u, double **v); static void ns_set_bounds(const ns_t * ns, uint64_t bounds, double **target); static void ns_swap_matrix(double ***x, double ***y); static bool is_valid_coordinate(const ns_t * ns, uint64_t x, uint64_t y); /** * END Private definitions */ /** * Public */ ns_t * ns_create(uint64_t world_width, uint64_t world_height, double viscosity, double density, double diffusion, double time_step) { uint64_t i; bool error = false; ns_t *ns = NULL; ns = (ns_t *) malloc(sizeof(ns_t)); if (ns == NULL) return NULL; //World ns->world_width = world_width; ns->world_width_bounds = ns->world_width + 2; ns->world_height = world_height; ns->world_height_bounds = ns->world_height + 2; //Fluid ns->viscosity = viscosity; ns->density = density; ns->diffusion = diffusion; //Time ns->time_step = time_step; //Allocate world data ns->u = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->u_prev = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->v = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->v_prev = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->dense = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->dense_prev = (double **)calloc(ns->world_height_bounds, sizeof(double *)); if (ns->u == NULL || ns->u_prev == NULL || ns->v == NULL || ns->v_prev == NULL || ns->dense == NULL || ns->dense_prev == NULL) { error = true; } if (!error) { schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default (none) private(i) shared(ns, error) for (i = 0; i < ns->world_height_bounds; ++i) { if (error) continue; ns->u[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->u_prev[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->v[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->v_prev[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->dense[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->dense_prev[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); if (ns->u[i] == NULL || ns->u_prev[i] == NULL || ns->v[i] == NULL || ns->v_prev[i] == NULL || ns->dense[i] == NULL || ns->dense_prev[i] == NULL) { error = true; } } } if (error) { ns_free(ns); return NULL; } return ns; } void ns_free(ns_t * ns) { if (ns == NULL) return; uint64_t i; schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default (none) private(i) shared(ns) for (i = 0; i < ns->world_height_bounds; ++i) { if (ns->u != NULL) free(ns->u[i]); if (ns->u_prev != NULL) free(ns->u_prev[i]); if (ns->v != NULL) free(ns->v[i]); if (ns->v_prev != NULL) free(ns->v_prev[i]); if (ns->dense != NULL) free(ns->dense[i]); if (ns->dense_prev != NULL) free(ns->dense_prev[i]); } free(ns->u); free(ns->u_prev); free(ns->v); free(ns->v_prev); free(ns->dense); free(ns->dense_prev); free(ns); } void ns_tick(ns_t * ns) { ns_velocity_step(ns); ns_density_step(ns); } bool ns_increase_density(ns_t * ns, uint64_t x, uint64_t y) { bool status = false; //Fix due to bounds x += 1; y += 1; if (!is_valid_coordinate(ns, x, y)) fprintf(stderr, "Invalid increase_density coordinates {x: %ld, y: %ld}\n", x, y); else status = true; if (status) ns->dense[y][x] += ns->density; return status; } bool ns_apply_force(ns_t * ns, uint64_t x, uint64_t y, double v_x, double v_y) { bool status = false; //Fix due to bounds x += 1; y += 1; if (!is_valid_coordinate(ns, x, y)) fprintf(stderr, "Invalid apply_force coordinates {x: %ld, y: %ld}\n", x, y); else if (v_x > NS_MAX_FORCE_VELOCITY || v_y > NS_MAX_FORCE_VELOCITY) fprintf(stdout, "Invalid apply_force velocity {v_x: %lf, v_y: %lf}\n", v_x, v_y); else status = true; if (status) { ns->u[y][x] = v_x != 0 ? v_x : ns->u[y][x]; ns->v[y][x] = v_y != 0 ? v_y : ns->v[y][x]; } return status; } ns_world_t * ns_get_world(const ns_t * ns) { uint64_t i, x, y; ns_world_t *world = (ns_world_t *) malloc(sizeof(ns_world_t)); world->world_width = ns->world_width; world->world_width_bounds = ns->world_width_bounds; world->world_height = ns->world_height; world->world_height_bounds = ns->world_height_bounds; world->world = (ns_cell_t **) calloc(ns->world_height_bounds, sizeof(ns_cell_t *)); default (none) private(i) shared(ns, world) { schedule(DEFAULT_OPEN_MP_SCHEDULE) for (i = 0; i < ns->world_height_bounds; ++i) world->world[i] = (ns_cell_t *) calloc(ns->world_width_bounds, sizeof(ns_cell_t)); schedule(DEFAULT_OPEN_MP_SCHEDULE) for (y = 0; y < ns->world_height_bounds; ++y) { for (x = 0; x < ns->world_width_bounds; ++x) { ns_cell_t cell; cell.u = &ns->u[y][x]; cell.v = &ns->v[y][x]; cell.density = &ns->dense[y][x]; world->world[y][x] = cell; } } } return world; } void ns_free_world(ns_world_t * world) { uint64_t i; schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default (none) private(i) shared(world) for (i = 0; i < world->world_height_bounds; ++i) { free(world->world[i]); } free(world->world); free(world); } /** * END Public */ /** * Private */ static void ns_velocity_step(ns_t * ns) { ns_add_sources_to_targets(ns); ns_swap_matrix(&ns->u_prev, &ns->u); ns_diffuse(ns, 1, ns->viscosity, ns->u, (const double **)ns->u_prev); ns_swap_matrix(&ns->v_prev, &ns->v); ns_diffuse(ns, 2, ns->viscosity, ns->v, (const double **)ns->v_prev); ns_project(ns); ns_swap_matrix(&ns->u_prev, &ns->u); ns_swap_matrix(&ns->v_prev, &ns->v); ns_advect(ns, 1, ns->u, ns->u_prev, ns->u_prev, ns->v_prev); ns_advect(ns, 2, ns->v, ns->v_prev, ns->u_prev, ns->v_prev); ns_project(ns); } static void ns_density_step(ns_t * ns) { ns_swap_matrix(&ns->dense_prev, &ns->dense); ns_diffuse(ns, 0, ns->diffusion, ns->dense, (const double **)ns->dense_prev); ns_swap_matrix(&ns->dense_prev, &ns->dense); ns_advect(ns, 0, ns->dense, ns->dense_prev, ns->u, ns->v); } static void ns_add_sources_to_targets(const ns_t * ns) { uint64_t x, y; schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default (none) private(y, x) shared(ns) for (y = 0; y < ns->world_height_bounds; ++y) { for (x = 0; x < ns->world_width_bounds; ++x) { ns->u[y][x] += ns->time_step * ns->u_prev[y][x]; ns->v[y][x] += ns->time_step * ns->v_prev[y][x]; } } } static void ns_diffuse(const ns_t * ns, uint64_t bounds, double diffusion_value, double **target, const double **source) { const double a = ns->time_step * diffusion_value * (double)ns->world_width * (double)ns->world_height; for (uint64_t k = 0; k < 20; k++) { for (uint64_t y = 1; y <= ns->world_height; ++y) { for (uint64_t x = 1; x <= ns->world_width; ++x) { target[y][x] = (source[y][x] + a * (target[y][x - 1] + target[y][x + 1] + target[y - 1][x] + target[y + 1][x])) / (1 + 4 * a); } } ns_set_bounds(ns, bounds, target); } } static void ns_project(ns_t * ns) { uint64_t x, y; double h = 1.0 / (double)ns->world_width; for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->v_prev[y][x] = -0.5 * h * (ns->u[y][x + 1] - ns->u[y][x - 1] + ns->v[y + 1][x] - ns->v[y - 1][x]); ns->u_prev[y][x] = 0; } } ns_set_bounds(ns, 0, ns->v_prev); ns_set_bounds(ns, 0, ns->u_prev); for (uint64_t k = 0; k < 20; k++) { for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->u_prev[y][x] = (ns->v_prev[y][x] + ns->u_prev[y][x - 1] + ns->u_prev[y][x + 1] + ns->u_prev[y - 1][x] + ns->u_prev[y + 1][x]) / 4; } } ns_set_bounds(ns, 0, ns->u_prev); } schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default (none) private(y, x) shared(ns, h) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->u[y][x] -= 0.5 * (ns->u_prev[y][x + 1] - ns->u_prev[y][x - 1]) / h; ns->v[y][x] -= 0.5 * (ns->u_prev[y + 1][x] - ns->u_prev[y - 1][x]) / h; } } ns_set_bounds(ns, 1, ns->u); ns_set_bounds(ns, 2, ns->v); } static void ns_advect(const ns_t * ns, uint64_t bounds, double **d, double **d0, double **u, double **v) { uint64_t x, y, x0, x1, y0, y1; double xx, yy, s0, s1, t0, t1; double dt0_width = ns->time_step * (double)ns->world_width; double dt0_height = ns->time_step * (double)ns->world_height; schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default (none) private(y, x, yy, xx, x0, x1, y0, y1, s0, s1, t0, t1) shared(ns, dt0_width, dt0_height, u, v, d, d0) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { xx = (double)x - dt0_width * u[y][x]; yy = (double)y - dt0_height * v[y][x]; //Check xx if (xx < 0.5) xx = 0.5; if (xx > (double)ns->world_width + 0.5) xx = (double)ns->world_width + 0.5; x0 = (uint64_t) xx; x1 = x0 + 1; //Check yy if (yy < 0.5) yy = 0.5; if (yy > (double)ns->world_height + 0.5) yy = (double)ns->world_height + 0.5; y0 = (uint64_t) yy; y1 = y0 + 1; s1 = xx - (double)x0; s0 = 1 - s1; t1 = yy - (double)y0; t0 = 1 - t1; d[y][x] = s0 * (t0 * d0[y0][x0] + t1 * d0[y1][x0]) + s1 * (t0 * d0[y0][x1] + t1 * d0[y1][x1]); } } ns_set_bounds(ns, bounds, d); } static void ns_set_bounds(const ns_t * ns, uint64_t bounds, double **target) { uint64_t y; uint64_t x; schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default (none) private(y, x) shared(ns, target, bounds) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { target[y][0] = (bounds == 1) ? -target[y][1] : target[y][1]; target[y][ns->world_width + 1] = bounds == 1 ? -target[y][ns->world_width] : target[y][ns->world_width]; target[0][x] = bounds == 2 ? -target[1][x] : target[1][x]; target[ns->world_height + 1][x] = bounds == 2 ? -target[ns->world_height][x] : target[ns->world_height][x]; } } target[0][0] = 0.5 * (target[0][1] + target[1][0]); target[ns->world_height + 1][0] = 0.5 * (target[ns->world_height + 1][1] + target[ns->world_height][0]); target[0][ns->world_width + 1] = 0.5 * (target[0][ns->world_width] + target[1][ns->world_width + 1]); target[ns->world_height + 1][ns->world_width + 1] = 0.5 * (target[ns->world_height + 1][ns->world_width] + target[ns->world_height][ns->world_width + 1]); } static void ns_swap_matrix(double ***x, double ***y) { double **tmp = *x; *x = *y; *y = tmp; } static bool is_valid_coordinate(const ns_t * ns, uint64_t x, uint64_t y) { return x >= 0 && x < ns->world_width_bounds && y >= 0 && y < ns->world_height_bounds; } /** * END Private */
#include "ns/solver.h" #include "ns/config.h" #include <stdlib.h> #include <stdio.h> // Data wrapper typedef struct ns_t { //World uint64_t world_width; uint64_t world_width_bounds; uint64_t world_height; uint64_t world_height_bounds; //Fluid double viscosity; double density; double diffusion; //Time double time_step; //World data double **u; double **u_prev; double **v; double **v_prev; double **dense; double **dense_prev; } ns_t; /** * Private definitions */ static void ns_velocity_step(ns_t * ns); static void ns_density_step(ns_t * ns); static void ns_add_sources_to_targets(const ns_t * ns); static void ns_diffuse(const ns_t * ns, uint64_t bounds, double diffusion_value, double **target, const double **source); static void ns_project(ns_t * ns); static void ns_advect(const ns_t * ns, uint64_t bounds, double **d, double **d0, double **u, double **v); static void ns_set_bounds(const ns_t * ns, uint64_t bounds, double **target); static void ns_swap_matrix(double ***x, double ***y); static bool is_valid_coordinate(const ns_t * ns, uint64_t x, uint64_t y); /** * END Private definitions */ /** * Public */ ns_t * ns_create(uint64_t world_width, uint64_t world_height, double viscosity, double density, double diffusion, double time_step) { uint64_t i; bool error = false; ns_t *ns = NULL; ns = (ns_t *) malloc(sizeof(ns_t)); if (ns == NULL) return NULL; //World ns->world_width = world_width; ns->world_width_bounds = ns->world_width + 2; ns->world_height = world_height; ns->world_height_bounds = ns->world_height + 2; //Fluid ns->viscosity = viscosity; ns->density = density; ns->diffusion = diffusion; //Time ns->time_step = time_step; //Allocate world data ns->u = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->u_prev = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->v = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->v_prev = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->dense = (double **)calloc(ns->world_height_bounds, sizeof(double *)); ns->dense_prev = (double **)calloc(ns->world_height_bounds, sizeof(double *)); if (ns->u == NULL || ns->u_prev == NULL || ns->v == NULL || ns->v_prev == NULL || ns->dense == NULL || ns->dense_prev == NULL) { error = true; } if (!error) { #pragma omp parallel for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(i) shared(ns, error) for (i = 0; i < ns->world_height_bounds; ++i) { if (error) continue; ns->u[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->u_prev[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->v[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->v_prev[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->dense[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); ns->dense_prev[i] = (double *)calloc(ns->world_width_bounds, sizeof(double)); if (ns->u[i] == NULL || ns->u_prev[i] == NULL || ns->v[i] == NULL || ns->v_prev[i] == NULL || ns->dense[i] == NULL || ns->dense_prev[i] == NULL) { #pragma omp critical error = true; } } } if (error) { ns_free(ns); return NULL; } return ns; } void ns_free(ns_t * ns) { if (ns == NULL) return; uint64_t i; #pragma omp parallel for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(i) shared(ns) for (i = 0; i < ns->world_height_bounds; ++i) { if (ns->u != NULL) free(ns->u[i]); if (ns->u_prev != NULL) free(ns->u_prev[i]); if (ns->v != NULL) free(ns->v[i]); if (ns->v_prev != NULL) free(ns->v_prev[i]); if (ns->dense != NULL) free(ns->dense[i]); if (ns->dense_prev != NULL) free(ns->dense_prev[i]); } free(ns->u); free(ns->u_prev); free(ns->v); free(ns->v_prev); free(ns->dense); free(ns->dense_prev); free(ns); } void ns_tick(ns_t * ns) { ns_velocity_step(ns); ns_density_step(ns); } bool ns_increase_density(ns_t * ns, uint64_t x, uint64_t y) { bool status = false; //Fix due to bounds x += 1; y += 1; if (!is_valid_coordinate(ns, x, y)) fprintf(stderr, "Invalid increase_density coordinates {x: %ld, y: %ld}\n", x, y); else status = true; if (status) ns->dense[y][x] += ns->density; return status; } bool ns_apply_force(ns_t * ns, uint64_t x, uint64_t y, double v_x, double v_y) { bool status = false; //Fix due to bounds x += 1; y += 1; if (!is_valid_coordinate(ns, x, y)) fprintf(stderr, "Invalid apply_force coordinates {x: %ld, y: %ld}\n", x, y); else if (v_x > NS_MAX_FORCE_VELOCITY || v_y > NS_MAX_FORCE_VELOCITY) fprintf(stdout, "Invalid apply_force velocity {v_x: %lf, v_y: %lf}\n", v_x, v_y); else status = true; if (status) { ns->u[y][x] = v_x != 0 ? v_x : ns->u[y][x]; ns->v[y][x] = v_y != 0 ? v_y : ns->v[y][x]; } return status; } ns_world_t * ns_get_world(const ns_t * ns) { uint64_t i, x, y; ns_world_t *world = (ns_world_t *) malloc(sizeof(ns_world_t)); world->world_width = ns->world_width; world->world_width_bounds = ns->world_width_bounds; world->world_height = ns->world_height; world->world_height_bounds = ns->world_height_bounds; world->world = (ns_cell_t **) calloc(ns->world_height_bounds, sizeof(ns_cell_t *)); #pragma omp parallel \ default(none) private(i) shared(ns, world) { #pragma omp for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) for (i = 0; i < ns->world_height_bounds; ++i) world->world[i] = (ns_cell_t *) calloc(ns->world_width_bounds, sizeof(ns_cell_t)); #pragma omp for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) for (y = 0; y < ns->world_height_bounds; ++y) { for (x = 0; x < ns->world_width_bounds; ++x) { ns_cell_t cell; cell.u = &ns->u[y][x]; cell.v = &ns->v[y][x]; cell.density = &ns->dense[y][x]; world->world[y][x] = cell; } } } return world; } void ns_free_world(ns_world_t * world) { uint64_t i; #pragma omp parallel for \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(i) shared(world) for (i = 0; i < world->world_height_bounds; ++i) { free(world->world[i]); } free(world->world); free(world); } /** * END Public */ /** * Private */ static void ns_velocity_step(ns_t * ns) { ns_add_sources_to_targets(ns); ns_swap_matrix(&ns->u_prev, &ns->u); ns_diffuse(ns, 1, ns->viscosity, ns->u, (const double **)ns->u_prev); ns_swap_matrix(&ns->v_prev, &ns->v); ns_diffuse(ns, 2, ns->viscosity, ns->v, (const double **)ns->v_prev); ns_project(ns); ns_swap_matrix(&ns->u_prev, &ns->u); ns_swap_matrix(&ns->v_prev, &ns->v); ns_advect(ns, 1, ns->u, ns->u_prev, ns->u_prev, ns->v_prev); ns_advect(ns, 2, ns->v, ns->v_prev, ns->u_prev, ns->v_prev); ns_project(ns); } static void ns_density_step(ns_t * ns) { ns_swap_matrix(&ns->dense_prev, &ns->dense); ns_diffuse(ns, 0, ns->diffusion, ns->dense, (const double **)ns->dense_prev); ns_swap_matrix(&ns->dense_prev, &ns->dense); ns_advect(ns, 0, ns->dense, ns->dense_prev, ns->u, ns->v); } static void ns_add_sources_to_targets(const ns_t * ns) { uint64_t x, y; #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x) shared(ns) for (y = 0; y < ns->world_height_bounds; ++y) { for (x = 0; x < ns->world_width_bounds; ++x) { ns->u[y][x] += ns->time_step * ns->u_prev[y][x]; ns->v[y][x] += ns->time_step * ns->v_prev[y][x]; } } } static void ns_diffuse(const ns_t * ns, uint64_t bounds, double diffusion_value, double **target, const double **source) { const double a = ns->time_step * diffusion_value * (double)ns->world_width * (double)ns->world_height; for (uint64_t k = 0; k < 20; k++) { for (uint64_t y = 1; y <= ns->world_height; ++y) { for (uint64_t x = 1; x <= ns->world_width; ++x) { target[y][x] = (source[y][x] + a * (target[y][x - 1] + target[y][x + 1] + target[y - 1][x] + target[y + 1][x])) / (1 + 4 * a); } } ns_set_bounds(ns, bounds, target); } } static void ns_project(ns_t * ns) { uint64_t x, y; double h = 1.0 / (double)ns->world_width; for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->v_prev[y][x] = -0.5 * h * (ns->u[y][x + 1] - ns->u[y][x - 1] + ns->v[y + 1][x] - ns->v[y - 1][x]); ns->u_prev[y][x] = 0; } } ns_set_bounds(ns, 0, ns->v_prev); ns_set_bounds(ns, 0, ns->u_prev); for (uint64_t k = 0; k < 20; k++) { for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->u_prev[y][x] = (ns->v_prev[y][x] + ns->u_prev[y][x - 1] + ns->u_prev[y][x + 1] + ns->u_prev[y - 1][x] + ns->u_prev[y + 1][x]) / 4; } } ns_set_bounds(ns, 0, ns->u_prev); } #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x) shared(ns, h) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { ns->u[y][x] -= 0.5 * (ns->u_prev[y][x + 1] - ns->u_prev[y][x - 1]) / h; ns->v[y][x] -= 0.5 * (ns->u_prev[y + 1][x] - ns->u_prev[y - 1][x]) / h; } } ns_set_bounds(ns, 1, ns->u); ns_set_bounds(ns, 2, ns->v); } static void ns_advect(const ns_t * ns, uint64_t bounds, double **d, double **d0, double **u, double **v) { uint64_t x, y, x0, x1, y0, y1; double xx, yy, s0, s1, t0, t1; double dt0_width = ns->time_step * (double)ns->world_width; double dt0_height = ns->time_step * (double)ns->world_height; #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x, yy, xx, x0, x1, y0, y1, s0, s1, t0, t1) shared(ns, dt0_width, dt0_height, u, v, d, d0) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { xx = (double)x - dt0_width * u[y][x]; yy = (double)y - dt0_height * v[y][x]; //Check xx if (xx < 0.5) xx = 0.5; if (xx > (double)ns->world_width + 0.5) xx = (double)ns->world_width + 0.5; x0 = (uint64_t) xx; x1 = x0 + 1; //Check yy if (yy < 0.5) yy = 0.5; if (yy > (double)ns->world_height + 0.5) yy = (double)ns->world_height + 0.5; y0 = (uint64_t) yy; y1 = y0 + 1; s1 = xx - (double)x0; s0 = 1 - s1; t1 = yy - (double)y0; t0 = 1 - t1; d[y][x] = s0 * (t0 * d0[y0][x0] + t1 * d0[y1][x0]) + s1 * (t0 * d0[y0][x1] + t1 * d0[y1][x1]); } } ns_set_bounds(ns, bounds, d); } static void ns_set_bounds(const ns_t * ns, uint64_t bounds, double **target) { uint64_t y; uint64_t x; #pragma omp parallel for collapse(2) \ schedule(DEFAULT_OPEN_MP_SCHEDULE) \ default(none) private(y, x) shared(ns, target, bounds) for (y = 1; y <= ns->world_height; ++y) { for (x = 1; x <= ns->world_width; ++x) { target[y][0] = (bounds == 1) ? -target[y][1] : target[y][1]; target[y][ns->world_width + 1] = bounds == 1 ? -target[y][ns->world_width] : target[y][ns->world_width]; target[0][x] = bounds == 2 ? -target[1][x] : target[1][x]; target[ns->world_height + 1][x] = bounds == 2 ? -target[ns->world_height][x] : target[ns->world_height][x]; } } target[0][0] = 0.5 * (target[0][1] + target[1][0]); target[ns->world_height + 1][0] = 0.5 * (target[ns->world_height + 1][1] + target[ns->world_height][0]); target[0][ns->world_width + 1] = 0.5 * (target[0][ns->world_width] + target[1][ns->world_width + 1]); target[ns->world_height + 1][ns->world_width + 1] = 0.5 * (target[ns->world_height + 1][ns->world_width] + target[ns->world_height][ns->world_width + 1]); } static void ns_swap_matrix(double ***x, double ***y) { double **tmp = *x; *x = *y; *y = tmp; } static bool is_valid_coordinate(const ns_t * ns, uint64_t x, uint64_t y) { return x >= 0 && x < ns->world_width_bounds && y >= 0 && y < ns->world_height_bounds; } /** * END Private */
GB_unop__identity_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fp32) // op(A') function: GB (_unop_tran__identity_uint32_fp32) // C type: uint32_t // A type: float // cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fp32) ( uint32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fp32) // op(A') function: GB (_unop_tran__identity_uint32_fp32) // C type: uint32_t // A type: float // cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fp32) ( uint32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fp32) // op(A') function: GB (_unop_tran__identity_uint32_fp32) // C type: uint32_t // A type: float // cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fp32) ( uint32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
updater_basemaker-inl.h
/*! * Copyright 2014 by Contributors * \file updater_basemaker-inl.h * \brief implement a common tree constructor * \author Tianqi Chen */ #ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /*! * \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker: public TreeUpdater { public: void Init(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); } protected: // helper to collect and query feature meta information struct FMetaHelper { public: /*! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix* p_fmat, const RegTree& tree) { fminmax_.resize(tree.param.num_feature * 2); std::fill(fminmax_.begin(), fminmax_.end(), -std::numeric_limits<bst_float>::max()); // start accumulating statistics for (const auto &batch : p_fmat->GetSortedColumnBatches()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { fminmax_[fid * 2 + 0] = std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /*! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size()); } // get feature type, 0:empty 1:binary 2:real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std::numeric_limits<bst_float>::max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid *2 + 1]; } inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const { std::vector<bst_uint> &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast<bst_uint>(i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast<unsigned>(p * findex.size()); std::shuffle(findex.begin(), findex.end(), common::GlobalRandom()); findex.resize(n); // sync the findex if it is subsample std::string s_cache; common::MemoryBufferStream fc(&s_cache); dmlc::Stream& fs = fc; if (rabit::GetRank() == 0) { fs.Write(findex); } rabit::Broadcast(&s_cache, 0); fs.Read(&findex); } private: std::vector<bst_float> fminmax_; }; // ------static helper functions ------ // helper function to get to next level of the tree /*! \brief this is helper function for row based data*/ inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) { const RegTree::Node &n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto& ins : inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } // ------class member helpers--------- /*! \brief initialize temp data structure */ inline void InitData(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector<unsigned> &root_index = fmat.Info().root_index_; { // setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std::fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } // mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i]; } // mark subsample if (param_.subsample < 1.0f) { std::bernoulli_distribution coin_flip(param_.subsample); auto& rnd = common::GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { // expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /*! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree &tree) { std::vector<int> newnodes; for (int nid : qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } // use new nodes for qexpand qexpand_ = newnodes; this->UpdateNode2WorkIndex(tree); } // return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } // encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /*! * \brief this is helper function uses column based data structure, * reset the positions to the lastest one * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void ResetPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { // set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /*! * \brief helper function to set the non-leaf positions to default direction. * This function can be applied multiple times and will get the same result. * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ inline void SetDefaultPostion(DMatrix *p_fmat, const RegTree &tree) { // set default direct nodes to default // for leaf nodes that are not fresh, mark then to ~nid, // so that they are ignored in future statistics collection const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { // mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { // push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /*! * \brief this is helper function uses column based data structure, * to CORRECT the positions of non-default directions that WAS set to default * before calling this function. * \param batch The column batch * \param sorted_split_set The set of index that contains split solutions. * \param tree the regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set, const RegTree &tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); // go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /*! * \brief this is helper function uses column based data structure, * \param nodes the set of nodes that contains the split to be used * \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector<int> &nodes, const RegTree &tree, std::vector<unsigned>* out_split_set) { std::vector<unsigned>& fsplits = *out_split_set; fsplits.clear(); // step 1, classify the non-default data into right places for (int nid : nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std::sort(fsplits.begin(), fsplits.end()); fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /*! * \brief this is helper function uses column based data structure, * update all positions into nondefault branch, if any, ignore the default branch * \param nodes the set of nodes that contains the split to be used * \param p_fmat feature matrix needed for tree construction * \param tree the regression tree structure */ virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes, DMatrix *p_fmat, const RegTree &tree) { std::vector<unsigned> fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto &batch : p_fmat->GetSortedColumnBatches()) { for (auto fid : fsplits) { auto col = batch[fid]; const auto ndata = static_cast<bst_omp_uint>(col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); // go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /*! \brief helper function to get statistics from a tree */ template<typename TStats> inline void GetNodeStats(const std::vector<GradientPair> &gpair, const DMatrix &fmat, const RegTree &tree, std::vector< std::vector<TStats> > *p_thread_temp, std::vector<TStats> *p_node_stats) { std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp; const MetaInfo &info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats(param_)); for (unsigned int nid : qexpand_) { thread_temp[tid][nid].Clear(); } } // setup position const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair, info, ridx); } } // sum the per thread statistics together for (int nid : qexpand_) { TStats &s = (*p_node_stats)[nid]; s.Clear(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /*! \brief common helper data structure to build sketch */ struct SketchEntry { /*! \brief total sum of amount to be met */ double sum_total; /*! \brief statistics used in the sketch */ double rmin, wmin; /*! \brief last seen feature value */ bst_float last_fvalue; /*! \brief current size of sketch */ double next_goal; // pointer to the sketch to put things in common::WXQuantileSketch<bst_float, bst_float> *sketch; // initialize the space inline void Init(unsigned max_size) { next_goal = -1.0f; rmin = wmin = 0.0f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /*! * \brief push a new element to sketch * \param fvalue feature value, comes in sorted ascending order * \param w weight * \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0f) { next_goal = 0.0f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0f + 1e-5f; } else { next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /*! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; // push to sketch sketch->temp.data[sketch->temp.size] = common::WXQuantileSketch<bst_float, bst_float>:: Entry(static_cast<bst_float>(rmin), static_cast<bst_float>(rmax), static_cast<bst_float>(wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /*! \brief training parameter of tree grower */ TrainParam param_; /*! \brief queue of nodes to be expanded */ std::vector<int> qexpand_; /*! * \brief map active node to is working index offset in qexpand, * can be -1, which means the node is node actively expanding */ std::vector<int> node2workindex_; /*! * \brief position of each instance in the tree * can be negative, which means this position is no longer expanding * see also Decode/EncodePosition */ std::vector<int> position_; private: inline void UpdateNode2WorkIndex(const RegTree &tree) { // update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast<int>(i); } } }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /* * ! \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker:public TreeUpdater { public: void Init(const std::vector < std::pair < std::string, std::string > >&args)override { param_.InitAllowUnknown(args); } protected: //helper to collect and query feature meta information struct FMetaHelper { public: /* ! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix * p_fmat, const RegTree & tree) { fminmax_.resize(tree.param.num_feature * 2); std: : fill(fminmax_.begin(), fminmax_.end(), -std: : numeric_limits < bst_float >: :max()); //start accumulating statistics for (const auto & batch:p_fmat->GetSortedColumnBatches()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { fminmax_[fid * 2 + 0] = std: : max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std: : max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /* ! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce < rabit::op::Max > (dmlc::BeginPtr(fminmax_), fminmax_.size()); } //get feature type, 0: empty 1: binary 2: real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std: : numeric_limits < bst_float >: : max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid * 2 + 1]; } inline void SampleCol(float p, std::vector < bst_uint > *p_findex)const { std::vector < bst_uint > &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast < bst_uint > (i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast < unsigned >(p * findex.size()); std: : shuffle(findex.begin(), findex.end(), common: :GlobalRandom()); findex.resize(n); //sync the findex if it is subsample std: : string s_cache; common: :MemoryBufferStream fc(&s_cache); dmlc: :Stream & fs = fc; if (rabit: :GetRank() == 0) { fs.Write(findex); } rabit: :Broadcast(&s_cache, 0); fs.Read(&findex); } private: std: : vector < bst_float > fminmax_; }; //------static helper functions-- -- -- //helper function to get to next level of the tree /* ! \brief this is helper function for row based data */ inline static int NextLevel(const SparsePage::Inst & inst, const RegTree & tree, int nid) { const RegTree::Node & n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto & ins: inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } //------class member helpers-- -- ----- /* ! \brief initialize temp data structure */ inline void InitData(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector < unsigned >&root_index = fmat.Info().root_index_; { //setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std: : fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } //mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) position_[i] = ~position_[i]; } //mark subsample if (param_.subsample < 1.0 f) { std: : bernoulli_distribution coin_flip(param_.subsample); auto & rnd = common: :GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { //expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /* ! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree & tree) { std::vector < int >newnodes; for (int nid: qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } //use new nodes for qexpand qexpand_ = newnodes; this ->UpdateNode2WorkIndex(tree); } //return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } //encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /* * ! \brief this is helper function uses column based data * structure, reset the positions to the lastest one \param nodes * the set of nodes that contains the split to be used \param * p_fmat feature matrix needed for tree construction \param tree * the regression tree structure */ inline void ResetPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { //set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /* * ! \brief helper function to set the non-leaf positions to * default direction. This function can be applied multiple times * and will get the same result. \param p_fmat feature matrix * needed for tree construction \param tree the regression tree * structure */ inline void SetDefaultPostion(DMatrix * p_fmat, const RegTree & tree) { //set default direct nodes to default //for leaf nodes that are not fresh, mark then to ~ nid, //so that they are ignored in future statistics collection const auto ndata = static_cast < bst_omp_uint > (p_fmat->Info().num_row_); for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { //mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { //push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /* * ! \brief this is helper function uses column based data * structure, to CORRECT the positions of non-default directions * that WAS set to default before calling this function. \param * batch The column batch \param sorted_split_set The set of * index that contains split solutions. \param tree the * regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage & batch, const std::vector < bst_uint > &sorted_split_set, const RegTree & tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast < bst_omp_uint > (col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); //go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /* * ! \brief this is helper function uses column based data * structure, \param nodes the set of nodes that contains the * split to be used \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector < int >&nodes, const RegTree & tree, std::vector < unsigned >*out_split_set) { std::vector < unsigned >&fsplits = *out_split_set; fsplits.clear(); //step 1, classify the non - default data into right places for (int nid: nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std: : sort(fsplits.begin(), fsplits.end()); fsplits.resize(std: :unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /* * ! \brief this is helper function uses column based data * structure, update all positions into nondefault branch, if * any, ignore the default branch \param nodes the set of nodes * that contains the split to be used \param p_fmat feature * matrix needed for tree construction \param tree the regression * tree structure */ virtual void SetNonDefaultPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { std::vector < unsigned >fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto & batch: p_fmat->GetSortedColumnBatches()) { for (auto fid:fsplits) { auto col = batch[fid]; const auto ndata = static_cast < bst_omp_uint > (col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); //go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /* ! \brief helper function to get statistics from a tree */ template < typename TStats > inline void GetNodeStats(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree, std::vector < std::vector < TStats > >*p_thread_temp, std::vector < TStats > *p_node_stats) { std::vector < std::vector < TStats > >&thread_temp = *p_thread_temp; const MetaInfo & info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats(param_)); for (unsigned int nid: qexpand_) { thread_temp[tid][nid].Clear(); } //setup position const auto ndata = static_cast < bst_omp_uint > (fmat.Info().num_row_); for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair, info, ridx); } } //sum the per thread statistics together for (int nid:qexpand_) { TStats & s = (*p_node_stats)[nid]; s.Clear(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /* ! \brief common helper data structure to build sketch */ struct SketchEntry { /* ! \brief total sum of amount to be met */ double sum_total; /* ! \brief statistics used in the sketch */ double rmin, wmin; /* ! \brief last seen feature value */ bst_float last_fvalue; /* ! \brief current size of sketch */ double next_goal; //pointer to the sketch to put things in common: : WXQuantileSketch < bst_float, bst_float > *sketch; //initialize the space inline void Init(unsigned max_size) { next_goal = -1.0 f; rmin = wmin = 0.0 f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /* * ! \brief push a new element to sketch \param fvalue * feature value, comes in sorted ascending order \param w * weight \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0 f) { next_goal = 0.0 f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0 f + 1e-5 f; } else { next_goal = static_cast < bst_float > (sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /* ! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /* ! \brief training parameter of tree grower */ TrainParam param_; /* ! \brief queue of nodes to be expanded */ std: : vector < int >qexpand_; /* * ! \brief map active node to is working index offset in * qexpand, can be -1, which means the node is node actively * expanding */ std: : vector < int >node2workindex_; /* * ! \brief position of each instance in the tree can be * negative, which means this position is no longer expanding see * also Decode/EncodePosition */ std: : vector < int >position_; private: inline void UpdateNode2WorkIndex(const RegTree & tree) { //update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast < int >(i); } } }; } //namespace tree } //namespace xgboost #endif /* // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ */
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /* * ! \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker:public TreeUpdater { public: void Init(const std::vector < std::pair < std::string, std::string > >&args)override { param_.InitAllowUnknown(args); } protected: //helper to collect and query feature meta information struct FMetaHelper { public: /* ! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix * p_fmat, const RegTree & tree) { fminmax_.resize(tree.param.num_feature * 2); std: : fill(fminmax_.begin(), fminmax_.end(), -std: : numeric_limits < bst_float >: :max()); //start accumulating statistics for (const auto & batch:p_fmat->GetSortedColumnBatches()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { fminmax_[fid * 2 + 0] = std: : max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std: : max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /* ! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce < rabit::op::Max > (dmlc::BeginPtr(fminmax_), fminmax_.size()); } //get feature type, 0: empty 1: binary 2: real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std: : numeric_limits < bst_float >: : max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid * 2 + 1]; } inline void SampleCol(float p, std::vector < bst_uint > *p_findex)const { std::vector < bst_uint > &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast < bst_uint > (i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast < unsigned >(p * findex.size()); std: : shuffle(findex.begin(), findex.end(), common: :GlobalRandom()); findex.resize(n); //sync the findex if it is subsample std: : string s_cache; common: :MemoryBufferStream fc(&s_cache); dmlc: :Stream & fs = fc; if (rabit: :GetRank() == 0) { fs.Write(findex); } rabit: :Broadcast(&s_cache, 0); fs.Read(&findex); } private: std: : vector < bst_float > fminmax_; }; //------static helper functions-- -- -- //helper function to get to next level of the tree /* ! \brief this is helper function for row based data */ inline static int NextLevel(const SparsePage::Inst & inst, const RegTree & tree, int nid) { const RegTree::Node & n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto & ins: inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } //------class member helpers-- -- ----- /* ! \brief initialize temp data structure */ inline void InitData(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector < unsigned >&root_index = fmat.Info().root_index_; { //setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std: : fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } //mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) position_[i] = ~position_[i]; } //mark subsample if (param_.subsample < 1.0 f) { std: : bernoulli_distribution coin_flip(param_.subsample); auto & rnd = common: :GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { //expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /* ! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree & tree) { std::vector < int >newnodes; for (int nid: qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } //use new nodes for qexpand qexpand_ = newnodes; this ->UpdateNode2WorkIndex(tree); } //return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } //encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /* * ! \brief this is helper function uses column based data * structure, reset the positions to the lastest one \param nodes * the set of nodes that contains the split to be used \param * p_fmat feature matrix needed for tree construction \param tree * the regression tree structure */ inline void ResetPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { //set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /* * ! \brief helper function to set the non-leaf positions to * default direction. This function can be applied multiple times * and will get the same result. \param p_fmat feature matrix * needed for tree construction \param tree the regression tree * structure */ inline void SetDefaultPostion(DMatrix * p_fmat, const RegTree & tree) { //set default direct nodes to default //for leaf nodes that are not fresh, mark then to ~ nid, //so that they are ignored in future statistics collection const auto ndata = static_cast < bst_omp_uint > (p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { //mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { //push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /* * ! \brief this is helper function uses column based data * structure, to CORRECT the positions of non-default directions * that WAS set to default before calling this function. \param * batch The column batch \param sorted_split_set The set of * index that contains split solutions. \param tree the * regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage & batch, const std::vector < bst_uint > &sorted_split_set, const RegTree & tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast < bst_omp_uint > (col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); //go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /* * ! \brief this is helper function uses column based data * structure, \param nodes the set of nodes that contains the * split to be used \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector < int >&nodes, const RegTree & tree, std::vector < unsigned >*out_split_set) { std::vector < unsigned >&fsplits = *out_split_set; fsplits.clear(); //step 1, classify the non - default data into right places for (int nid: nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std: : sort(fsplits.begin(), fsplits.end()); fsplits.resize(std: :unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /* * ! \brief this is helper function uses column based data * structure, update all positions into nondefault branch, if * any, ignore the default branch \param nodes the set of nodes * that contains the split to be used \param p_fmat feature * matrix needed for tree construction \param tree the regression * tree structure */ virtual void SetNonDefaultPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { std::vector < unsigned >fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto & batch: p_fmat->GetSortedColumnBatches()) { for (auto fid:fsplits) { auto col = batch[fid]; const auto ndata = static_cast < bst_omp_uint > (col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); //go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /* ! \brief helper function to get statistics from a tree */ template < typename TStats > inline void GetNodeStats(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree, std::vector < std::vector < TStats > >*p_thread_temp, std::vector < TStats > *p_node_stats) { std::vector < std::vector < TStats > >&thread_temp = *p_thread_temp; const MetaInfo & info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats(param_)); for (unsigned int nid: qexpand_) { thread_temp[tid][nid].Clear(); } } //setup position const auto ndata = static_cast < bst_omp_uint > (fmat.Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair, info, ridx); } } //sum the per thread statistics together for (int nid:qexpand_) { TStats & s = (*p_node_stats)[nid]; s.Clear(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /* ! \brief common helper data structure to build sketch */ struct SketchEntry { /* ! \brief total sum of amount to be met */ double sum_total; /* ! \brief statistics used in the sketch */ double rmin, wmin; /* ! \brief last seen feature value */ bst_float last_fvalue; /* ! \brief current size of sketch */ double next_goal; //pointer to the sketch to put things in common: : WXQuantileSketch < bst_float, bst_float > *sketch; //initialize the space inline void Init(unsigned max_size) { next_goal = -1.0 f; rmin = wmin = 0.0 f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /* * ! \brief push a new element to sketch \param fvalue * feature value, comes in sorted ascending order \param w * weight \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0 f) { next_goal = 0.0 f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0 f + 1e-5 f; } else { next_goal = static_cast < bst_float > (sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /* ! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /* ! \brief training parameter of tree grower */ TrainParam param_; /* ! \brief queue of nodes to be expanded */ std: : vector < int >qexpand_; /* * ! \brief map active node to is working index offset in * qexpand, can be -1, which means the node is node actively * expanding */ std: : vector < int >node2workindex_; /* * ! \brief position of each instance in the tree can be * negative, which means this position is no longer expanding see * also Decode/EncodePosition */ std: : vector < int >position_; private: inline void UpdateNode2WorkIndex(const RegTree & tree) { //update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast < int >(i); } } }; } //namespace tree } //namespace xgboost #endif /* // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ */
displacement_lagrangemultiplier_residual_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierResidualContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierResidualContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor (parameters) * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param RotRatioTolerance Relative tolerance for rotation residual error * @param RotAbsTolerance Absolute tolerance for rotation residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType RotRatioTolerance, const TDataType RotAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // The displacement residual mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation residual mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; // The normal contact residual mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } //* Copy constructor. DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) ,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm) ,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) ,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm) ,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm) { } /// Destructor. ~DisplacementLagrangeMultiplierResidualContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0; IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, residual_dof_value) reduction(+:disp_residual_solution_norm,rot_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,rot_dof_num,lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id] == 1) { residual_dof_value = rb[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_residual_solution_norm += std::pow(residual_dof_value, 2); ++lm_dof_num; } else if ((*p_check_disp)(r_curr_var)) { disp_residual_solution_norm += std::pow(residual_dof_value, 2); ++disp_dof_num; } else { // We will assume is rotation dof KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; rot_residual_solution_norm += std::pow(residual_dof_value, 2); ++rot_dof_num; } } } } mDispCurrentResidualNorm = disp_residual_solution_norm; mRotCurrentResidualNorm = rot_residual_solution_norm; mLMCurrentResidualNorm = lm_residual_solution_norm; TDataType residual_disp_ratio = 1.0; TDataType residual_rot_ratio = 1.0; TDataType residual_lm_ratio = 1.0; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm; mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm; residual_disp_ratio = 1.0; residual_lm_ratio = 1.0; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm; residual_rot_ratio = 1.0; } mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the ratio of the rotations residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm; // We calculate the ratio of the LM residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; // We calculate the absolute norms const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; const TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num; const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } else { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio; r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true; const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance); if (disp_converged && rot_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) Table << BOLDFONT(FGRN(" Achieved")); else Table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true); } // Check rotation dof mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flag mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_lagrangemultiplier_residual_contact_criteria", "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "rotation_residual_relative_tolerance" : 1.0e-4, "rotation_residual_absolute_tolerance" : 1.0e-9, "contact_residual_relative_tolerance" : 1.0e-4, "contact_residual_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_lagrangemultiplier_residual_contact_criteria"; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement residual mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The rotation residual mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble(); // The contact residual mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual std::vector<int> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierResidualContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierResidualContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor (parameters) * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param RotRatioTolerance Relative tolerance for rotation residual error * @param RotAbsTolerance Absolute tolerance for rotation residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType RotRatioTolerance, const TDataType RotAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // The displacement residual mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation residual mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; // The normal contact residual mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } //* Copy constructor. DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) ,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm) ,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) ,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm) ,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm) { } /// Destructor. ~DisplacementLagrangeMultiplierResidualContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0; IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id] == 1) { residual_dof_value = rb[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_residual_solution_norm += std::pow(residual_dof_value, 2); ++lm_dof_num; } else if ((*p_check_disp)(r_curr_var)) { disp_residual_solution_norm += std::pow(residual_dof_value, 2); ++disp_dof_num; } else { // We will assume is rotation dof KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; rot_residual_solution_norm += std::pow(residual_dof_value, 2); ++rot_dof_num; } } } } mDispCurrentResidualNorm = disp_residual_solution_norm; mRotCurrentResidualNorm = rot_residual_solution_norm; mLMCurrentResidualNorm = lm_residual_solution_norm; TDataType residual_disp_ratio = 1.0; TDataType residual_rot_ratio = 1.0; TDataType residual_lm_ratio = 1.0; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm; mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm; residual_disp_ratio = 1.0; residual_lm_ratio = 1.0; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm; residual_rot_ratio = 1.0; } mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the ratio of the rotations residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm; // We calculate the ratio of the LM residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; // We calculate the absolute norms const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; const TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num; const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } else { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio; r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true; const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance); if (disp_converged && rot_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) Table << BOLDFONT(FGRN(" Achieved")); else Table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true); } // Check rotation dof mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flag mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_lagrangemultiplier_residual_contact_criteria", "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "rotation_residual_relative_tolerance" : 1.0e-4, "rotation_residual_absolute_tolerance" : 1.0e-9, "contact_residual_relative_tolerance" : 1.0e-4, "contact_residual_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_lagrangemultiplier_residual_contact_criteria"; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement residual mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The rotation residual mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble(); // The contact residual mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual std::vector<int> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierResidualContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierResidualContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor (parameters) * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param RotRatioTolerance Relative tolerance for rotation residual error * @param RotAbsTolerance Absolute tolerance for rotation residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType RotRatioTolerance, const TDataType RotAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // The displacement residual mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The rotation residual mRotRatioTolerance = RotRatioTolerance; mRotAbsTolerance = RotAbsTolerance; // The normal contact residual mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } //* Copy constructor. DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mRotRatioTolerance(rOther.mRotRatioTolerance) ,mRotAbsTolerance(rOther.mRotAbsTolerance) ,mRotInitialResidualNorm(rOther.mRotInitialResidualNorm) ,mRotCurrentResidualNorm(rOther.mRotCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) ,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm) ,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm) { } /// Destructor. ~DisplacementLagrangeMultiplierResidualContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, rot_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0; IndexType disp_dof_num(0), rot_dof_num(0), lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Auxiliar displacement DoF check const std::function<bool(const VariableData&)> check_without_rot = [](const VariableData& rCurrVar) -> bool {return true;}; const std::function<bool(const VariableData&)> check_with_rot = [](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));}; const auto* p_check_disp = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot; // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, residual_dof_value) reduction(+:disp_residual_solution_norm,rot_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,rot_dof_num,lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id] == 1) { residual_dof_value = rb[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_residual_solution_norm += std::pow(residual_dof_value, 2); ++lm_dof_num; } else if ((*p_check_disp)(r_curr_var)) { disp_residual_solution_norm += std::pow(residual_dof_value, 2); ++disp_dof_num; } else { // We will assume is rotation dof KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl; rot_residual_solution_norm += std::pow(residual_dof_value, 2); ++rot_dof_num; } } } } mDispCurrentResidualNorm = disp_residual_solution_norm; mRotCurrentResidualNorm = rot_residual_solution_norm; mLMCurrentResidualNorm = lm_residual_solution_norm; TDataType residual_disp_ratio = 1.0; TDataType residual_rot_ratio = 1.0; TDataType residual_lm_ratio = 1.0; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm; mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm; residual_disp_ratio = 1.0; residual_lm_ratio = 1.0; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { mRotInitialResidualNorm = (rot_residual_solution_norm == 0.0) ? 1.0 : rot_residual_solution_norm; residual_rot_ratio = 1.0; } mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the ratio of the rotations residual_rot_ratio = mRotCurrentResidualNorm/mRotInitialResidualNorm; // We calculate the ratio of the LM residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; // We calculate the absolute norms const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; const TDataType residual_rot_abs = mRotCurrentResidualNorm/rot_dof_num; const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_rot_ratio << mRotRatioTolerance << residual_rot_abs << mRotAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } else { Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << residual_rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << residual_rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; if (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tROTATION: RATIO = " << residual_rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << residual_rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl; } KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio; r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool rot_converged = (mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? (residual_rot_ratio <= mRotRatioTolerance || residual_rot_abs <= mRotAbsTolerance) : true; const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance); if (disp_converged && rot_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) Table << BOLDFONT(FGRN(" Achieved")); else Table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED)) { r_table.AddColumn("RT RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true); } // Check rotation dof mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart)); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flag mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "displacement_lagrangemultiplier_residual_contact_criteria", "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "rotation_residual_relative_tolerance" : 1.0e-4, "rotation_residual_absolute_tolerance" : 1.0e-9, "contact_residual_relative_tolerance" : 1.0e-4, "contact_residual_absolute_tolerance" : 1.0e-9 })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "displacement_lagrangemultiplier_residual_contact_criteria"; } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // The displacement residual mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The rotation residual mRotRatioTolerance = ThisParameters["rotation_residual_relative_tolerance"].GetDouble(); mRotAbsTolerance = ThisParameters["rotation_residual_absolute_tolerance"].GetDouble(); // The contact residual mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ROTATION_DOF_IS_CONSIDERED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mRotRatioTolerance; /// The ratio threshold for the norm of the rotation residual TDataType mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation residual TDataType mRotInitialResidualNorm; /// The reference norm of the rotation residual TDataType mRotCurrentResidualNorm; /// The current norm of the rotation residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual std::vector<int> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
mcxyzn.c
/******************************************** * mcxyzn, in ANSI Standard C programing language * Usage: mcxyz myname and myname_T.bin * which loads myname_H.mci, and saves myname_F.bin. * * Initial version is an extension of mcxyz.c and the methodology is described in: * A.P. Tran and S.L. Jacques, 2020. * Modeling voxel-based Monte Carlo light transport with curved and oblique boundary surfaces. * Journal of Biomedical Optics, 25(2), p.025001. * * USAGE mcxyzn myname * where myname is the user's choice. * The program reads two files prepared by user: * myname_H.mci = header input file for mcxyz * myname_T.bin = tissue structure file * The output will be written to 3 files: * myname_OP.m = optical properties (mua, mus, g for each tissue type) * myname_F.bin = fluence rate output F[i] [W/cm^2 per W delivered] * * The MATLAB program maketissue.m can create the two input files (myname_H.mci, myname_T.bin). * * The MATLAB program lookmcxyzn.m can read the output files and display * 1. Fluence rate F [W/cm^2 per W delivered] * 2. Deposition rate A [W/cm^3 per W delivered]. * * Log: * Original mcxyz.c was created by Steven L. Jacques and Ting Li (Oregon Health & Science University), 2010/2012. * Written by Ting based on Steve's mcsub.c., 2010. * Use Ting's FindVoxelFace(). * Use Steve's FindVoxelFace(), Dec. 30, 2010. * Reorganized by Steve. May 8, 2012: * Reads input files, outputs binary files. **********/ #include "mcxyzn.h" int main(int argc, const char * argv[]) { if (argc==0) { printf("assuming you've compiled mcxyz.c as gomcxyz ...\n"); printf("USAGE: gomcxyz name\n"); printf("which will load the files name_H.mci and name_T.bin\n"); printf("and run the Monte Carlo program.\n"); printf("Yields name_F.bin, which holds the fluence rate distribution.\n"); return 0; } mcconfig cfg; mcxyzn_init(&cfg,argc,argv); mcxyzn_launchsimulation(&cfg); return 0; } /* end of main */ void mcxyzn_init(mcconfig *cfg,int argc, const char * argv[]) { long int i; /* Input/Output */ strcpy(cfg->myname, argv[1]); // acquire name from argument of function call by user. printf("name = %s\n",cfg->myname); /**** INPUT FILES *****/ /* IMPORT myname_H.mci */ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_H.mci"); cfg->fid = fopen(cfg->filename,"r"); fgets(cfg->buf, 32, cfg->fid); // run parameters sscanf(cfg->buf, "%f", &cfg->time_min); // desired time duration of run [min] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nx); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Ny); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nz); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dx); // size of bins [cm] fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dy); // size of bins [cm] fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dz); // size of bins [cm] // launch parameters fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->mcflag); // mcflag, 0 = uniform, 1 = Gaussian, 2 = iso-pt fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->launchflag); // launchflag, 0 = ignore, 1 = manually set fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->boundaryflag); // 0 = no boundaries, 1 = escape at all boundaries, 2 = escape at surface only fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->gradientflag); fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->xs); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->ys); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->zs); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->xfocus); // xfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->yfocus); // yfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->zfocus); // zfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->ux0); // ux trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->uy0); // uy trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->uz0); // uz trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->radius); // radius fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->waist); // waist // tissue optical properties fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nt); // # of tissue types in tissue list for (i=1; i<=cfg->Nt; i++) { fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->muav[i]); // absorption coeff [cm^-1] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->musv[i]); // scattering coeff [cm^-1] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->gv[i]); // anisotropy of scatter [dimensionless] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->nv[i]); } fclose(cfg->fid); printf("time_min = %0.2f min\n",cfg->time_min); printf("Nx = %d, dx = %0.4f [cm]\n",cfg->Nx,cfg->dx); printf("Ny = %d, dy = %0.4f [cm]\n",cfg->Ny,cfg->dy); printf("Nz = %d, dz = %0.4f [cm]\n",cfg->Nz,cfg->dz); printf("xs = %0.4f [cm]\n",cfg->xs); printf("ys = %0.4f [cm]\n",cfg->ys); printf("zs = %0.4f [cm]\n",cfg->zs); printf("mcflag = %d\n",cfg->mcflag); if (cfg->mcflag==0) printf("launching uniform flat-field beam\n"); if (cfg->mcflag==1) printf("launching Gaussian beam\n"); if (cfg->mcflag==2) printf("launching isotropic point source\n"); if (cfg->mcflag==3) printf("launching square source\n"); printf("xfocus = %0.4f [cm]\n",cfg->xfocus); printf("yfocus = %0.4f [cm]\n",cfg->yfocus); printf("zfocus = %0.2e [cm]\n",cfg->zfocus); if (cfg->launchflag==1) { printf("Launchflag ON, so launch the following:\n"); printf("ux0 = %0.4f [cm]\n",cfg->ux0); printf("uy0 = %0.4f [cm]\n",cfg->uy0); printf("uz0 = %0.4f [cm]\n",cfg->uz0); } else { printf("Launchflag OFF, so program calculates launch angles.\n"); printf("radius = %0.4f [cm]\n",cfg->radius); printf("waist = %0.4f [cm]\n",cfg->waist); } if (cfg->boundaryflag==0) printf("boundaryflag = 0, so no boundaries.\n"); else if (cfg->boundaryflag==1) printf("boundaryflag = 1, so escape at all boundaries.\n"); else if (cfg->boundaryflag==2) printf("boundaryflag = 2, so escape at surface only.\n"); else{ printf("improper boundaryflag. quit.\n"); //return 0; } printf("# of tissues available, Nt = %d\n",cfg->Nt); for (i=1; i<=cfg->Nt; i++) { printf("muav[%ld] = %0.4f [cm^-1]\n",i,cfg->muav[i]); printf("musv[%ld] = %0.4f [cm^-1]\n",i,cfg->musv[i]); printf(" gv[%ld] = %0.4f [--]\n",i,cfg->gv[i]); printf(" nv[%ld] = %0.4f [--]\n\n",i,cfg->nv[i]); } // SAVE optical properties, for later use by MATLAB. strcpy(cfg->filename,cfg->myname); strcat(cfg->filename,"_props.m"); cfg->fid = fopen(cfg->filename,"w"); for (i=1; i<=cfg->Nt; i++) { fprintf(cfg->fid,"muav(%ld) = %0.4f;\n",i,cfg->muav[i]); fprintf(cfg->fid,"musv(%ld) = %0.4f;\n",i,cfg->musv[i]); fprintf(cfg->fid,"gv(%ld) = %0.4f;\n",i,cfg->gv[i]); fprintf(cfg->fid,"nv(%ld) = %0.4f;\n\n",i,cfg->nv[i]); } fclose(cfg->fid); /* IMPORT BINARY TISSUE FILE */ cfg->NN = cfg->Nx*cfg->Ny*cfg->Nz; return; } void mcxyzn_launchsimulation(mcconfig *cfg) { unsigned char *v; float *F; float4 *g; //float *R_host; /* Initializing storing elements */ v = (unsigned char *)malloc(cfg->NN*sizeof(unsigned char)); /* tissue structure */ F = (float *)malloc(cfg->NN*sizeof(float)); /* relative fluence rate [W/cm^2/W.delivered] */ g = (float4 *)malloc(cfg->NN*sizeof(float4)); //R = (float *)malloc(cfg->Nx*cfg->Ny*sizeof(cl_mem)); /* escaping flux [W/cm^2/W.delivered] */ //for (i=0; i<Ny*Nx; i++) R[i] = 0; //prop_host = (float4 *)malloc(cfg->NN*sizeof(float4)); //cl_float *R = (cl_float *)(R_host); /* read binary file */ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_T.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(v, sizeof(unsigned char), cfg->NN, cfg->fid); fclose(cfg->fid); if (cfg->gradientflag > 0){ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gx.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].x = F[j]; // ensure F[] starts empty. } strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gy.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].y = F[j]; // ensure F[] starts empty. } strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gz.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].z = F[j]; // ensure F[] starts empty. } } for(int j=0; j<cfg->NN;j++) { F[j] = 0.f; // ensure F[] starts empty. } /* Show tissue on screen, along central z-axis, by listing tissue type #'s.*/ printf("central axial profile of tissue types:\n"); for (int iz=0; iz<cfg->Nz; iz++) { int i = (long)(iz*cfg->Ny*cfg->Nx + (cfg->Ny/2)*cfg->Nx + cfg->Nx/2); printf("%d",v[i]); } printf("\n\n"); /************************************* * == Setting up OpenCL structure == * *************************************/ /* Number of photons launched */ cfg->Nphotons = 50000; int nb_threads = omp_get_max_threads(); /* Create seeds for threads */ int *seed; seed = (int *)malloc(nb_threads*sizeof(int)*2); for (int i=0;i<nb_threads;i++){ seed[i*2]= rand(); seed[i*2+1] = rand(); } printf("Creating random seed of length (2 seeds per thread): %i \n",(int)nb_threads*2); /******************************************************************* * ============================ MAJOR CYCLE ======================== *******************************************************************/ cfg->start_time = clock(); cfg->now = time(NULL); printf("\n%s\n", ctime(&cfg->now)); /* Launch main kernel */ printf("[====== Main kernel ======]\n"); printf("Launching %i photons with %i threads. \n",(int)cfg->Nphotons,(int)nb_threads); double start,start2; double end,end2; start = omp_get_wtime(); mcxyz_kernel(cfg,v,F,seed,g,cfg->Nphotons); end = omp_get_wtime(); printf("Test kernel of %i photons took %f sec.\n",(int) cfg->Nphotons, end-start); cfg->Nphotons = ceil(cfg->Nphotons/(end-start)*cfg->time_min*60)- cfg->Nphotons; printf("Launching remaining %i photons.\n", (int) cfg->Nphotons); start2 = omp_get_wtime(); mcxyz_kernel(cfg,v,F,seed,g,cfg->Nphotons); end2 = omp_get_wtime(); printf("Main kernel took %f sec.\n",end2-start2); printf("Total running time of %f sec for %i photons. \n",end+end2-start-start2,(int)cfg->Nphotons+50000); /* printf("------------------------------------------------------\n"); */ /* printf("Elapsed Time for %i photons = %f sec\n",(int)cfg->Nphotons,(float)(end-start)/(1e9)); */ /* printf("%i photons per minute\n", (int) (cfg->Nphotons/(end-start)*(1e9)*60)); */ /* printf("------------------------------------------------------\n"); */ /************** * == Save == * **************/ // Normalize deposition (A) to yield fluence rate (F). float temp = cfg->dx*cfg->dy*cfg->dz*(cfg->Nphotons); for (int i=0; i<cfg->NN;i++){ F[i] = (F[i]/(temp*cfg->muav[v[i]])); } // Save the binary file strcpy(cfg->filename,cfg->myname); strcat(cfg->filename,"_F.bin"); printf("saving %s\n",cfg->filename); cfg->fid = fopen(cfg->filename, "wb"); /* 3D voxel output */ fwrite(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); /* save reflectance */ /*float temp = cfg->dx*cfg->dy*(cfg->Nphotons+50000); for (int i=0; i<cfg->Nx*cfg->Ny;i++){ R[i] = (F[i]/(temp)); } strcpy(filename,myname); strcat(filename,"_Ryx.bin"); printf("saving %s\n",filename); fid = fopen(filename, "wb"); /* 2D voxel output */ /*fwrite(R, sizeof(float), cfg->Ny*cfg->Nx, fid); fclose(fid); printf("%s is done.\n",myname);*/ printf("------------------------------------------------------\n"); cfg->now = time(NULL); printf("%s\n", ctime(&cfg->now)); free(F); free(v); free(g); //free(R_host); return; } /* If 1+cos(theta) <= ONE_MINUS_COSZERO, fabs(PI-theta) <= 1e-6 rad. */ /* SUBROUTINES */ static inline unsigned long rotl(const unsigned long x, int k) { return (x << k) | (x >> (64 - k)); } /********************* * RandomGen * *********************/ float RandomGen(unsigned long* s) { union { unsigned long i; unsigned int u[2]; float f[2]; } result; result.i = s[0] + s[1]; s[1] ^= s[0]; s[0] = rotl(s[0], 24) ^ s[1] ^ (s[1] << 16); // a, b s[1] = rotl(s[1], 37); // c result.u[0] = 0x3F800000U | (result.u[0] >> 9); return result.f[0] - 1.f; } /************* SET SOURCE*************** * Launch collimated beam at x,y center. ***************************************/ void LaunchPhoton(mcconfig* cfg, float4* pos, float4* u, float* rnd, unsigned long* seed) { float r, phi, temp; /****************************/ /* Initial position. */ /* trajectory */ if (cfg->launchflag == 1) { // manually set launch pos->x = cfg->xs; pos->y = cfg->ys; pos->z = cfg->zs; u->x = cfg->ux0; u->y = cfg->uy0; u->z = cfg->uz0; } else { // use mcflag if (cfg->mcflag == 0) { // uniform beam //set launch point and width of beam while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 r = cfg->radius * sqrt(*rnd); // radius of beam at launch point while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 phi = (*rnd) * 2.0 * PI; pos->x = cfg->xs + r * cos(phi); pos->y = cfg->ys + r * sin(phi); pos->z = cfg->zs; // set trajectory toward focus while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 r = cfg->waist * sqrt(*rnd); // radius of beam at focus while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 phi = (*rnd) * 2.0 * PI; float xfocus = cfg->xs + r * cos(phi); //SLJ add cfg->xs float yfocus = cfg->ys + r * sin(phi); //SLJ add cfg->ys temp = 1 / sqrt((pos->x - xfocus) * (pos->x - xfocus) + (pos->y - yfocus) * (pos->y - yfocus) + (pos->z - cfg->zfocus) * (pos->z - cfg->zfocus)); u->x = -(pos->x - xfocus) * temp; u->y = -(pos->y - yfocus) * temp; u->z = sqrt(1 - u->x * u->x - u->y * u->y); } else if (cfg->mcflag == 2) { // isotropic pt source float ctheta = 1.0 - 2.0 * RandomGen(seed); float stheta = sqrt(1.0 - ctheta * ctheta); float psi = 2.0 * PI * RandomGen(seed); float cpsi = cos(psi); float spsi; if (psi < PI) spsi = sqrt(1.0 - cpsi * cpsi); else spsi = -sqrt(1.0 - cpsi * cpsi); pos->x = cfg->xs; pos->y = cfg->ys; pos->z = cfg->zs; u->x = stheta * cpsi; u->y = stheta * spsi; u->z = ctheta; } else if (cfg->mcflag == 3) { // rectangular source collimated while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 pos->x = cfg->radius * ((*rnd) * 2 - 1); // use radius to specify x-halfwidth of rectangle while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 pos->y = cfg->radius * ((*rnd) * 2 - 1); // use radius to specify y-halfwidth of rectangle pos->z = cfg->zs; u->x = 0.0; u->y = 0.0; u->z = 1.0; // collimated beam } } // end use mcflag pos->x = cfg->Nx / 2 + pos->x / cfg->dx; pos->y = cfg->Ny / 2 + pos->y / cfg->dy; pos->z = pos->z / cfg->dz; /****************************/ } /*********************************************************** * Determine if the two position are located in the same voxel * Returns 1 if same voxel, 0 if not same voxel. ****/ int SameVoxel(mcconfig* cfg, float x1, float y1, float z1, float x2, float y2, float z2) { float xmin = fmin((floor)(x1), (floor)(x2)); float ymin = fmin((floor)(y1), (floor)(y2)); float zmin = fmin((floor)(z1), (floor)(z2)); float xmax = xmin + 1; float ymax = ymin + 1; float zmax = zmin + 1; return ((x1 <= xmax && x2 <= xmax && y1 <= ymax && y2 <= ymax && z1 < zmax && z2 <= zmax)); } /******************** * my version of FindVoxelFace for no scattering. * s = ls + FindVoxelFace2(x,y,z, tempx, tempy, tempz, dx, dy, dz, ux, uy, uz); ****/ float FindVoxelFace2(mcconfig* cfg, float x1, float y1, float z1, float x2, float y2, float z2, float ux, float uy, float uz) { int ix1 = floor(x1); int iy1 = floor(y1); int iz1 = floor(z1); int ix2, iy2, iz2; if (ux >= 0) ix2 = ix1 + 1; else ix2 = ix1; if (uy >= 0) iy2 = iy1 + 1; else iy2 = iy1; if (uz >= 0) iz2 = iz1 + 1; else iz2 = iz1; float xs = fabs((ix2 - x1) / ux); float ys = fabs((iy2 - y1) / uy); float zs = fabs((iz2 - z1) / uz); float s = fmin(xs, fmin(ys, zs)); return (s * cfg->dx); } /*********************************************************** * FRESNEL REFLECTANCE * Computes reflectance as photon passes from medium 1 to * medium 2 with refractive indices n1,n2. Incident * angle a1 is specified by cosine value ca1 = cos(a1). * Program returns value of transmitted angle a1 as * value in *ca2_Ptr = cos(a2). ****/ float RFresnel(float4* u, float4* g, float4* gb, float n1, float n2, unsigned long* seed, int* TIR_events, float* status) { if (n1 == n2) { return 1.0; } else { if ((g->x * g->x + g->y * g->y + g->z * g->z) == 0) { *g = *gb; } float rand = RandomGen(seed); float cos_i = -(u->x) * g->x - (u->y) * g->y - (u->z) * g->z; if (cos_i > 0.99999) { float r = (n2 - n1) / (n2 + n1); r *= r; if (rand > r) { //u->x = -g->x, u->y = -g->y, u->z = -g->z; return 1.0; } else { u->x = -u->x, u->y = -u->y, u->z = -u->z; return 0.0; } } else if (cos_i < 1e-5) { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; return 0.0; } else { float sin_t2 = pow(n1 / n2, 2) * (1 - cos_i * cos_i); if (sin_t2 >= 1.0) { if (*TIR_events < MAX_TIR) { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; (*TIR_events)++; return 0.0; } else { *status = DEAD; u->w = 0.0; return 1.0; } } else { float cos_t = sqrt(1.0 - sin_t2); float temp1 = n1 * cos_i; float temp2 = n2 * cos_t; temp1 = (temp1 - temp2) / (temp1 + temp2); float r = 0.5 * temp1 * temp1; temp1 = n2 * cos_i; temp2 = n1 * cos_t; temp1 = (temp1 - temp2) / (temp1 + temp2); r += 0.5 * temp1 * temp1; if (rand > r) { temp1 = n1 / n2; temp2 = temp1 * cos_i - cos_t; u->x = temp1 * (u->x) + temp2 * g->x; u->y = temp1 * (u->y) + temp2 * g->y; u->z = temp1 * (u->z) + temp2 * g->z; return 1.0; } else { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; return 0.0; } } } } } /******** END SUBROUTINE **********/ int getindex(mcconfig* cfg, int x, int y, int z) { return z * cfg->Ny * cfg->Nx + x * cfg->Ny + y; } void InterpGradient(mcconfig* cfg, float4* g, unsigned char* v, float4* pos, float4* n, unsigned char tissue) { if (pos->x >= cfg->Nx - 0.5) { pos->x = cfg->Nx - 0.51; } if (pos->y >= cfg->Ny - 0.5) { pos->y = cfg->Ny - 0.51; } if (pos->z >= cfg->Nz - 0.5) { pos->z = cfg->Nz - 0.51; } if (pos->x < 0.5) { pos->x = 0.51; } if (pos->y < 0.5) { pos->y = 0.51; } if (pos->z < 0.5) { pos->z = 0.51; } float x = round(pos->x); float y = round(pos->y); float z = round(pos->z); float xd = pos->x - x + 0.5; float yd = pos->y - y + 0.5; float zd = pos->z - z + 0.5; float v000, v001, v010, v011, v100, v101, v110, v111; v000 = (v[getindex(cfg, x - 1, y - 1, z - 1)] == tissue); v001 = (v[getindex(cfg, x - 1, y - 1, z)] == tissue); v010 = (v[getindex(cfg, x - 1, y, z - 1)] == tissue); v011 = (v[getindex(cfg, x - 1, y, z)] == tissue); v100 = (v[getindex(cfg, x, y - 1, z - 1)] == tissue); v101 = (v[getindex(cfg, x, y - 1, z)] == tissue); v110 = (v[getindex(cfg, x, y, z - 1)] == tissue); v111 = (v[getindex(cfg, x, y, z)] == tissue); float c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].x * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].x * v100; float c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].x * v001 + xd * g[getindex(cfg, x, y - 1, z)].x * v101; float c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].x * v010 + xd * g[getindex(cfg, x, y, z - 1)].x * v110; float c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].x * v011 + xd * g[getindex(cfg, x, y, z)].x * v111; float c0 = (1 - yd) * c00 + yd * c10; float c1 = (1 - yd) * c01 + yd * c11; n->x = c0 * (1 - zd) + c1 * zd; c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].y * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].y * v100; c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].y * v001 + xd * g[getindex(cfg, x, y - 1, z)].y * v101; c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].y * v010 + xd * g[getindex(cfg, x, y, z - 1)].y * v110; c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].y * v011 + xd * g[getindex(cfg, x, y, z)].y * v111; c0 = (1 - yd) * c00 + yd * c10; c1 = (1 - yd) * c01 + yd * c11; n->y = c0 * (1 - zd) + c1 * zd; c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].z * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].z * v100; c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].z * v001 + xd * g[getindex(cfg, x, y - 1, z)].z * v101; c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].z * v010 + xd * g[getindex(cfg, x, y, z - 1)].z * v110; c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].z * v011 + xd * g[getindex(cfg, x, y, z)].z * v111; c0 = (1 - yd) * c00 + yd * c10; c1 = (1 - yd) * c01 + yd * c11; n->z = c0 * (1 - zd) + c1 * zd; float magn = sqrt(n->x * n->x + n->y * n->y + n->z * n->z); n->x = n->x / magn; n->y = n->y / magn; n->z = n->z / magn; return; } void mcxyzn_kernel(mcconfig* cfg, unsigned char* v, float* F, int* dseed, float4* g, const int Nphotons) { #pragma omp parallel { int idx = omp_get_thread_num(); unsigned long seed[2]; seed[0] = dseed[idx * 2]; seed[1] = dseed[idx * 2 + 1]; #pragma omp for for (int k = 0; k < Nphotons; k++) { //if (idx == 0) //printf("Thread %i photon %i/%i Rand %f %i %i\n",idx,k+1,Nphotons,RandomGen(seed),seed[0],seed[1]); /**** LAUNCH Initialize photon position and trajectory. *****/ //if (fmod(i_photon,10)==0) printf("photon %ld took %d steps\n",i_photon,CNT); float rnd; /* assigned random value 0-1 */ /* dummy values */ float temp; /* dummy variable */ int ix, iy, iz; /* Added. Used to track photons */ float4 pos; /* photon position .w = weight */ float4 u; /* photon trajectory .w = sleft */ unsigned char type; /* absorption coef [cm^-1] scattering coef [cm^-1] anisotropy [-] refractivity index [-] */ pos.w = 1.0; /* set photon weight to one */ float status = ALIVE; /* Launch an ALIVE photon */ int TIR_events = 0; LaunchPhoton(cfg, &pos, &u, &rnd, seed); /* Get tissue voxel properties of launchpoint. * If photon beyond outer edge of defined voxels, * the tissue equals properties of outermost voxels. * Therefore, set outermost voxels to infinite background value. */ ix = (int)(pos.x); iy = (int)(pos.y); iz = (int)(pos.z); if (ix >= cfg->Nx) ix = cfg->Nx - 1; if (iy >= cfg->Ny) iy = cfg->Ny - 1; if (iz >= cfg->Nz) iz = cfg->Nz - 1; if (ix < 0) ix = 0; if (iy < 0) iy = 0; if (iz < 0) iz = 0; /* Get the tissue type of located voxel */ int i = (int)(iz * cfg->Ny * cfg->Nx + ix * cfg->Ny + iy);//(iz*cfg->Ny*cfg->Nx + ix*cfg->Ny + iy); type = v[i]; int bflag = 1; /* HOP_DROP_SPIN_CHECK Propagate one photon until it dies as determined by ROULETTE. *******/ do { /**** HOP Take step to new position s = dimensionless stepsize x, uy, uz are cosines of current photon trajectory *****/ while ((rnd = RandomGen(seed)) <= ls); /* yields 0 < rnd <= 1 */ u.w = -log(rnd); /* dimensionless step */ do { // while sleft>0 float s = u.w / cfg->musv[type]; /* Step size [cm].*/ float tempx = pos.x + s * u.x / cfg->dx; /* Update positions. [cm] */ float tempy = pos.y + s * u.y / cfg->dx; float tempz = pos.z + s * u.z / cfg->dx; if (SameVoxel(cfg, pos.x, pos.y, pos.z, tempx, tempy, tempz)) /* photon in same voxel */ { pos.x = tempx; /* Update positions. */ pos.y = tempy; pos.z = tempz; /**** DROP Drop photon weight (W) into local bin. *****/ float absorb = pos.w * (1 - exp(-cfg->muav[type] * s)); /* photon weight absorbed at this step */ if (absorb != absorb) { status = DEAD; u.w = 0; } else { //atomicadd(&(F[i]),absorb); if (bflag) { #pragma omp atomic F[i] += absorb; } pos.w -= absorb; /* decrement WEIGHT by amount absorbed */ } // If photon within volume of heterogeneity, deposit energy in F[]. // Normalize F[] later, when save output. /* Update sleft */ u.w = 0; /* dimensionless step remaining */ } else /* photon has crossed voxel boundary */ { /* step to voxel face + "littlest step" so just inside new voxel. */ s = ls + FindVoxelFace2(cfg, pos.x, pos.y, pos.z, tempx, tempy, tempz, u.x, u.y, u.z); float temp_px, temp_py, temp_pz; /* Update positions. */ temp_px = pos.x + s * u.x / cfg->dx; temp_py = pos.y + s * u.y / cfg->dx; temp_pz = pos.z + s * u.z / cfg->dx; /**** DROP Drop photon weight (W) into local bin. *****/ float absorb = pos.w * (1 - exp(-cfg->muav[type] * s)); /* photon weight absorbed at this step */ if (absorb != absorb) { status = DEAD; u.w = 0; } else { //atomicadd(&(F[i]),absorb); if (bflag) { #pragma omp atomic F[i] += absorb; } pos.w -= absorb; /* decrement WEIGHT by amount absorbed */ } /* Update sleft */ u.w -= s * cfg->musv[type]; /* dimensionless step remaining */ if (u.w <= ls) u.w = 0; int temp_ix = (int)floor(temp_px); int temp_iy = (int)floor(temp_py); int temp_iz = (int)floor(temp_pz); bflag = 1; //boundary flag. Initalize as 1 = inside volume, then check; if (cfg->boundaryflag == 0) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; bflag = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; bflag = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; bflag = 0; } if (temp_iz < 0) { temp_iz = 0; bflag = 0; } if (temp_ix < 0) { temp_ix = 0; bflag = 0; } if (temp_iy < 0) { temp_iy = 0; bflag = 0; } } else if (cfg->boundaryflag == 1) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; status = DEAD; u.w = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; status = DEAD; u.w = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; status = DEAD; u.w = 0; } if (temp_iz < 0) { temp_iz = 0; status = DEAD; u.w = 0; } if (temp_ix < 0) { temp_ix = 0; status = DEAD; u.w = 0; } if (temp_iy < 0) { temp_iy = 0; status = DEAD; u.w = 0; } } else if (cfg->boundaryflag == 2) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; bflag = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; bflag = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; bflag = 0; } if (temp_iz < 0) { temp_iz = 0; status = DEAD; u.w = 0; } if (temp_ix < 0) { temp_ix = 0; bflag = 0; } if (temp_iy < 0) { temp_iy = 0; bflag = 0; } } int p = (int)(temp_iz * cfg->Ny * cfg->Nx + temp_ix * cfg->Ny + temp_iy); int fstatus = 1; float4 ga = g[i]; float4 gb = { ix - temp_ix,iy - temp_iy,iz - temp_iz,0.0 }; if (cfg->gradientflag == 2) { InterpGradient(cfg, g, v, &pos, &ga, v[i]); fstatus = RFresnel(&u, &ga, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } else if (cfg->gradientflag == 1) { fstatus = RFresnel(&u, &ga, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } else if (cfg->gradientflag == 0) { fstatus = RFresnel(&u, &gb, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } if (fstatus == 0) { pos.x = temp_px + (pos.x - temp_px) * ls * 2; pos.y = temp_py + (pos.y - temp_py) * ls * 2; pos.z = temp_pz + (pos.z - temp_pz) * ls * 2; } else { ix = temp_ix; iy = temp_iy; iz = temp_iz; // update pointer to tissue type type = v[p]; pos.x = temp_px; pos.y = temp_py; pos.z = temp_pz; i = p; } } //(sv) /* same voxel */ } while (u.w > 0.f); /**** SPIN Scatter photon into new trajectory defined by theta and psi. Theta is specified by cos(theta), which is determined based on the Henyey-Greenstein scattering function. Convert theta and psi into cosines ux, uy, uz. *****/ /* Sample for costheta */ rnd = RandomGen(seed); float ctheta, stheta, psi, cpsi, spsi; if (cfg->gv[type] == 0.0) ctheta = 2.0 * rnd - 1.0; else { temp = (1.0 - cfg->gv[type] * cfg->gv[type]) / (1.0 - cfg->gv[type] + 2 * cfg->gv[type] * rnd); ctheta = (1.0 + cfg->gv[type] * cfg->gv[type] - temp * temp) / (2.0 * cfg->gv[type]); } stheta = sqrt(1.0 - ctheta * ctheta); /* sqrtf() is faster than sin(). */ /* Sample psi. */ psi = 2.0 * PI * RandomGen(seed); cpsi = cos(psi); if (psi < PI) spsi = sqrt(1.0 - cpsi * cpsi); /* sqrtf() is faster than sin(). */ else spsi = -sqrt(1.0 - cpsi * cpsi); /* New trajectory. */ if (1 - fabs(u.z) <= ls) { /* close to perpendicular. */ u.x = stheta * cpsi; u.y = stheta * spsi; u.z = ctheta * SIGN(u.z); /* SIGN() is faster than division. */ } else { /* usually use this option */ temp = sqrt(1.0 - u.z * u.z); float ux, uy, uz; ux = stheta * (u.x * u.z * cpsi - u.y * spsi) / temp + u.x * ctheta; uy = stheta * (u.y * u.z * cpsi + u.x * spsi) / temp + u.y * ctheta; uz = -stheta * cpsi * temp + u.z * ctheta; u.x = ux; u.y = uy; u.z = uz; } /**** CHECK ROULETTE If photon weight below THRESHOLD, then terminate photon using Roulette technique. Photon has CHANCE probability of having its weight increased by factor of 1/CHANCE, and 1-CHANCE probability of terminating. *****/ if (pos.w < THRESHOLD) { if (RandomGen(seed) * CHANCE <= 1.f) pos.w *= CHANCE; else status = DEAD; } } while (status == ALIVE); /* end STEP_CHECK_HOP_SPIN */ /* if ALIVE, continue propagating */ /* If photon DEAD, then launch new photon. */ } //#pragma omp for } //#pragma omp parallel return; }
/******************************************** * mcxyzn, in ANSI Standard C programing language * Usage: mcxyz myname and myname_T.bin * which loads myname_H.mci, and saves myname_F.bin. * * Initial version is an extension of mcxyz.c and the methodology is described in: * A.P. Tran and S.L. Jacques, 2020. * Modeling voxel-based Monte Carlo light transport with curved and oblique boundary surfaces. * Journal of Biomedical Optics, 25(2), p.025001. * * USAGE mcxyzn myname * where myname is the user's choice. * The program reads two files prepared by user: * myname_H.mci = header input file for mcxyz * myname_T.bin = tissue structure file * The output will be written to 3 files: * myname_OP.m = optical properties (mua, mus, g for each tissue type) * myname_F.bin = fluence rate output F[i] [W/cm^2 per W delivered] * * The MATLAB program maketissue.m can create the two input files (myname_H.mci, myname_T.bin). * * The MATLAB program lookmcxyzn.m can read the output files and display * 1. Fluence rate F [W/cm^2 per W delivered] * 2. Deposition rate A [W/cm^3 per W delivered]. * * Log: * Original mcxyz.c was created by Steven L. Jacques and Ting Li (Oregon Health & Science University), 2010/2012. * Written by Ting based on Steve's mcsub.c., 2010. * Use Ting's FindVoxelFace(). * Use Steve's FindVoxelFace(), Dec. 30, 2010. * Reorganized by Steve. May 8, 2012: * Reads input files, outputs binary files. **********/ #include "mcxyzn.h" int main(int argc, const char * argv[]) { if (argc==0) { printf("assuming you've compiled mcxyz.c as gomcxyz ...\n"); printf("USAGE: gomcxyz name\n"); printf("which will load the files name_H.mci and name_T.bin\n"); printf("and run the Monte Carlo program.\n"); printf("Yields name_F.bin, which holds the fluence rate distribution.\n"); return 0; } mcconfig cfg; mcxyzn_init(&cfg,argc,argv); mcxyzn_launchsimulation(&cfg); return 0; } /* end of main */ void mcxyzn_init(mcconfig *cfg,int argc, const char * argv[]) { long int i; /* Input/Output */ strcpy(cfg->myname, argv[1]); // acquire name from argument of function call by user. printf("name = %s\n",cfg->myname); /**** INPUT FILES *****/ /* IMPORT myname_H.mci */ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_H.mci"); cfg->fid = fopen(cfg->filename,"r"); fgets(cfg->buf, 32, cfg->fid); // run parameters sscanf(cfg->buf, "%f", &cfg->time_min); // desired time duration of run [min] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nx); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Ny); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nz); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dx); // size of bins [cm] fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dy); // size of bins [cm] fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dz); // size of bins [cm] // launch parameters fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->mcflag); // mcflag, 0 = uniform, 1 = Gaussian, 2 = iso-pt fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->launchflag); // launchflag, 0 = ignore, 1 = manually set fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->boundaryflag); // 0 = no boundaries, 1 = escape at all boundaries, 2 = escape at surface only fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->gradientflag); fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->xs); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->ys); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->zs); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->xfocus); // xfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->yfocus); // yfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->zfocus); // zfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->ux0); // ux trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->uy0); // uy trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->uz0); // uz trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->radius); // radius fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->waist); // waist // tissue optical properties fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nt); // # of tissue types in tissue list for (i=1; i<=cfg->Nt; i++) { fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->muav[i]); // absorption coeff [cm^-1] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->musv[i]); // scattering coeff [cm^-1] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->gv[i]); // anisotropy of scatter [dimensionless] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->nv[i]); } fclose(cfg->fid); printf("time_min = %0.2f min\n",cfg->time_min); printf("Nx = %d, dx = %0.4f [cm]\n",cfg->Nx,cfg->dx); printf("Ny = %d, dy = %0.4f [cm]\n",cfg->Ny,cfg->dy); printf("Nz = %d, dz = %0.4f [cm]\n",cfg->Nz,cfg->dz); printf("xs = %0.4f [cm]\n",cfg->xs); printf("ys = %0.4f [cm]\n",cfg->ys); printf("zs = %0.4f [cm]\n",cfg->zs); printf("mcflag = %d\n",cfg->mcflag); if (cfg->mcflag==0) printf("launching uniform flat-field beam\n"); if (cfg->mcflag==1) printf("launching Gaussian beam\n"); if (cfg->mcflag==2) printf("launching isotropic point source\n"); if (cfg->mcflag==3) printf("launching square source\n"); printf("xfocus = %0.4f [cm]\n",cfg->xfocus); printf("yfocus = %0.4f [cm]\n",cfg->yfocus); printf("zfocus = %0.2e [cm]\n",cfg->zfocus); if (cfg->launchflag==1) { printf("Launchflag ON, so launch the following:\n"); printf("ux0 = %0.4f [cm]\n",cfg->ux0); printf("uy0 = %0.4f [cm]\n",cfg->uy0); printf("uz0 = %0.4f [cm]\n",cfg->uz0); } else { printf("Launchflag OFF, so program calculates launch angles.\n"); printf("radius = %0.4f [cm]\n",cfg->radius); printf("waist = %0.4f [cm]\n",cfg->waist); } if (cfg->boundaryflag==0) printf("boundaryflag = 0, so no boundaries.\n"); else if (cfg->boundaryflag==1) printf("boundaryflag = 1, so escape at all boundaries.\n"); else if (cfg->boundaryflag==2) printf("boundaryflag = 2, so escape at surface only.\n"); else{ printf("improper boundaryflag. quit.\n"); //return 0; } printf("# of tissues available, Nt = %d\n",cfg->Nt); for (i=1; i<=cfg->Nt; i++) { printf("muav[%ld] = %0.4f [cm^-1]\n",i,cfg->muav[i]); printf("musv[%ld] = %0.4f [cm^-1]\n",i,cfg->musv[i]); printf(" gv[%ld] = %0.4f [--]\n",i,cfg->gv[i]); printf(" nv[%ld] = %0.4f [--]\n\n",i,cfg->nv[i]); } // SAVE optical properties, for later use by MATLAB. strcpy(cfg->filename,cfg->myname); strcat(cfg->filename,"_props.m"); cfg->fid = fopen(cfg->filename,"w"); for (i=1; i<=cfg->Nt; i++) { fprintf(cfg->fid,"muav(%ld) = %0.4f;\n",i,cfg->muav[i]); fprintf(cfg->fid,"musv(%ld) = %0.4f;\n",i,cfg->musv[i]); fprintf(cfg->fid,"gv(%ld) = %0.4f;\n",i,cfg->gv[i]); fprintf(cfg->fid,"nv(%ld) = %0.4f;\n\n",i,cfg->nv[i]); } fclose(cfg->fid); /* IMPORT BINARY TISSUE FILE */ cfg->NN = cfg->Nx*cfg->Ny*cfg->Nz; return; } void mcxyzn_launchsimulation(mcconfig *cfg) { unsigned char *v; float *F; float4 *g; //float *R_host; /* Initializing storing elements */ v = (unsigned char *)malloc(cfg->NN*sizeof(unsigned char)); /* tissue structure */ F = (float *)malloc(cfg->NN*sizeof(float)); /* relative fluence rate [W/cm^2/W.delivered] */ g = (float4 *)malloc(cfg->NN*sizeof(float4)); //R = (float *)malloc(cfg->Nx*cfg->Ny*sizeof(cl_mem)); /* escaping flux [W/cm^2/W.delivered] */ //for (i=0; i<Ny*Nx; i++) R[i] = 0; //prop_host = (float4 *)malloc(cfg->NN*sizeof(float4)); //cl_float *R = (cl_float *)(R_host); /* read binary file */ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_T.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(v, sizeof(unsigned char), cfg->NN, cfg->fid); fclose(cfg->fid); if (cfg->gradientflag > 0){ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gx.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].x = F[j]; // ensure F[] starts empty. } strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gy.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].y = F[j]; // ensure F[] starts empty. } strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gz.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].z = F[j]; // ensure F[] starts empty. } } for(int j=0; j<cfg->NN;j++) { F[j] = 0.f; // ensure F[] starts empty. } /* Show tissue on screen, along central z-axis, by listing tissue type #'s.*/ printf("central axial profile of tissue types:\n"); for (int iz=0; iz<cfg->Nz; iz++) { int i = (long)(iz*cfg->Ny*cfg->Nx + (cfg->Ny/2)*cfg->Nx + cfg->Nx/2); printf("%d",v[i]); } printf("\n\n"); /************************************* * == Setting up OpenCL structure == * *************************************/ /* Number of photons launched */ cfg->Nphotons = 50000; int nb_threads = omp_get_max_threads(); /* Create seeds for threads */ int *seed; seed = (int *)malloc(nb_threads*sizeof(int)*2); for (int i=0;i<nb_threads;i++){ seed[i*2]= rand(); seed[i*2+1] = rand(); } printf("Creating random seed of length (2 seeds per thread): %i \n",(int)nb_threads*2); /******************************************************************* * ============================ MAJOR CYCLE ======================== *******************************************************************/ cfg->start_time = clock(); cfg->now = time(NULL); printf("\n%s\n", ctime(&cfg->now)); /* Launch main kernel */ printf("[====== Main kernel ======]\n"); printf("Launching %i photons with %i threads. \n",(int)cfg->Nphotons,(int)nb_threads); double start,start2; double end,end2; start = omp_get_wtime(); mcxyz_kernel(cfg,v,F,seed,g,cfg->Nphotons); end = omp_get_wtime(); printf("Test kernel of %i photons took %f sec.\n",(int) cfg->Nphotons, end-start); cfg->Nphotons = ceil(cfg->Nphotons/(end-start)*cfg->time_min*60)- cfg->Nphotons; printf("Launching remaining %i photons.\n", (int) cfg->Nphotons); start2 = omp_get_wtime(); mcxyz_kernel(cfg,v,F,seed,g,cfg->Nphotons); end2 = omp_get_wtime(); printf("Main kernel took %f sec.\n",end2-start2); printf("Total running time of %f sec for %i photons. \n",end+end2-start-start2,(int)cfg->Nphotons+50000); /* printf("------------------------------------------------------\n"); */ /* printf("Elapsed Time for %i photons = %f sec\n",(int)cfg->Nphotons,(float)(end-start)/(1e9)); */ /* printf("%i photons per minute\n", (int) (cfg->Nphotons/(end-start)*(1e9)*60)); */ /* printf("------------------------------------------------------\n"); */ /************** * == Save == * **************/ // Normalize deposition (A) to yield fluence rate (F). float temp = cfg->dx*cfg->dy*cfg->dz*(cfg->Nphotons); for (int i=0; i<cfg->NN;i++){ F[i] = (F[i]/(temp*cfg->muav[v[i]])); } // Save the binary file strcpy(cfg->filename,cfg->myname); strcat(cfg->filename,"_F.bin"); printf("saving %s\n",cfg->filename); cfg->fid = fopen(cfg->filename, "wb"); /* 3D voxel output */ fwrite(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); /* save reflectance */ /*float temp = cfg->dx*cfg->dy*(cfg->Nphotons+50000); for (int i=0; i<cfg->Nx*cfg->Ny;i++){ R[i] = (F[i]/(temp)); } strcpy(filename,myname); strcat(filename,"_Ryx.bin"); printf("saving %s\n",filename); fid = fopen(filename, "wb"); /* 2D voxel output */ /*fwrite(R, sizeof(float), cfg->Ny*cfg->Nx, fid); fclose(fid); printf("%s is done.\n",myname);*/ printf("------------------------------------------------------\n"); cfg->now = time(NULL); printf("%s\n", ctime(&cfg->now)); free(F); free(v); free(g); //free(R_host); return; } /* If 1+cos(theta) <= ONE_MINUS_COSZERO, fabs(PI-theta) <= 1e-6 rad. */ /* SUBROUTINES */ static inline unsigned long rotl(const unsigned long x, int k) { return (x << k) | (x >> (64 - k)); } /********************* * RandomGen * *********************/ float RandomGen(unsigned long* s) { union { unsigned long i; unsigned int u[2]; float f[2]; } result; result.i = s[0] + s[1]; s[1] ^= s[0]; s[0] = rotl(s[0], 24) ^ s[1] ^ (s[1] << 16); // a, b s[1] = rotl(s[1], 37); // c result.u[0] = 0x3F800000U | (result.u[0] >> 9); return result.f[0] - 1.f; } /************* SET SOURCE*************** * Launch collimated beam at x,y center. ***************************************/ void LaunchPhoton(mcconfig* cfg, float4* pos, float4* u, float* rnd, unsigned long* seed) { float r, phi, temp; /****************************/ /* Initial position. */ /* trajectory */ if (cfg->launchflag == 1) { // manually set launch pos->x = cfg->xs; pos->y = cfg->ys; pos->z = cfg->zs; u->x = cfg->ux0; u->y = cfg->uy0; u->z = cfg->uz0; } else { // use mcflag if (cfg->mcflag == 0) { // uniform beam //set launch point and width of beam while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 r = cfg->radius * sqrt(*rnd); // radius of beam at launch point while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 phi = (*rnd) * 2.0 * PI; pos->x = cfg->xs + r * cos(phi); pos->y = cfg->ys + r * sin(phi); pos->z = cfg->zs; // set trajectory toward focus while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 r = cfg->waist * sqrt(*rnd); // radius of beam at focus while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 phi = (*rnd) * 2.0 * PI; float xfocus = cfg->xs + r * cos(phi); //SLJ add cfg->xs float yfocus = cfg->ys + r * sin(phi); //SLJ add cfg->ys temp = 1 / sqrt((pos->x - xfocus) * (pos->x - xfocus) + (pos->y - yfocus) * (pos->y - yfocus) + (pos->z - cfg->zfocus) * (pos->z - cfg->zfocus)); u->x = -(pos->x - xfocus) * temp; u->y = -(pos->y - yfocus) * temp; u->z = sqrt(1 - u->x * u->x - u->y * u->y); } else if (cfg->mcflag == 2) { // isotropic pt source float ctheta = 1.0 - 2.0 * RandomGen(seed); float stheta = sqrt(1.0 - ctheta * ctheta); float psi = 2.0 * PI * RandomGen(seed); float cpsi = cos(psi); float spsi; if (psi < PI) spsi = sqrt(1.0 - cpsi * cpsi); else spsi = -sqrt(1.0 - cpsi * cpsi); pos->x = cfg->xs; pos->y = cfg->ys; pos->z = cfg->zs; u->x = stheta * cpsi; u->y = stheta * spsi; u->z = ctheta; } else if (cfg->mcflag == 3) { // rectangular source collimated while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 pos->x = cfg->radius * ((*rnd) * 2 - 1); // use radius to specify x-halfwidth of rectangle while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 pos->y = cfg->radius * ((*rnd) * 2 - 1); // use radius to specify y-halfwidth of rectangle pos->z = cfg->zs; u->x = 0.0; u->y = 0.0; u->z = 1.0; // collimated beam } } // end use mcflag pos->x = cfg->Nx / 2 + pos->x / cfg->dx; pos->y = cfg->Ny / 2 + pos->y / cfg->dy; pos->z = pos->z / cfg->dz; /****************************/ } /*********************************************************** * Determine if the two position are located in the same voxel * Returns 1 if same voxel, 0 if not same voxel. ****/ int SameVoxel(mcconfig* cfg, float x1, float y1, float z1, float x2, float y2, float z2) { float xmin = fmin((floor)(x1), (floor)(x2)); float ymin = fmin((floor)(y1), (floor)(y2)); float zmin = fmin((floor)(z1), (floor)(z2)); float xmax = xmin + 1; float ymax = ymin + 1; float zmax = zmin + 1; return ((x1 <= xmax && x2 <= xmax && y1 <= ymax && y2 <= ymax && z1 < zmax && z2 <= zmax)); } /******************** * my version of FindVoxelFace for no scattering. * s = ls + FindVoxelFace2(x,y,z, tempx, tempy, tempz, dx, dy, dz, ux, uy, uz); ****/ float FindVoxelFace2(mcconfig* cfg, float x1, float y1, float z1, float x2, float y2, float z2, float ux, float uy, float uz) { int ix1 = floor(x1); int iy1 = floor(y1); int iz1 = floor(z1); int ix2, iy2, iz2; if (ux >= 0) ix2 = ix1 + 1; else ix2 = ix1; if (uy >= 0) iy2 = iy1 + 1; else iy2 = iy1; if (uz >= 0) iz2 = iz1 + 1; else iz2 = iz1; float xs = fabs((ix2 - x1) / ux); float ys = fabs((iy2 - y1) / uy); float zs = fabs((iz2 - z1) / uz); float s = fmin(xs, fmin(ys, zs)); return (s * cfg->dx); } /*********************************************************** * FRESNEL REFLECTANCE * Computes reflectance as photon passes from medium 1 to * medium 2 with refractive indices n1,n2. Incident * angle a1 is specified by cosine value ca1 = cos(a1). * Program returns value of transmitted angle a1 as * value in *ca2_Ptr = cos(a2). ****/ float RFresnel(float4* u, float4* g, float4* gb, float n1, float n2, unsigned long* seed, int* TIR_events, float* status) { if (n1 == n2) { return 1.0; } else { if ((g->x * g->x + g->y * g->y + g->z * g->z) == 0) { *g = *gb; } float rand = RandomGen(seed); float cos_i = -(u->x) * g->x - (u->y) * g->y - (u->z) * g->z; if (cos_i > 0.99999) { float r = (n2 - n1) / (n2 + n1); r *= r; if (rand > r) { //u->x = -g->x, u->y = -g->y, u->z = -g->z; return 1.0; } else { u->x = -u->x, u->y = -u->y, u->z = -u->z; return 0.0; } } else if (cos_i < 1e-5) { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; return 0.0; } else { float sin_t2 = pow(n1 / n2, 2) * (1 - cos_i * cos_i); if (sin_t2 >= 1.0) { if (*TIR_events < MAX_TIR) { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; (*TIR_events)++; return 0.0; } else { *status = DEAD; u->w = 0.0; return 1.0; } } else { float cos_t = sqrt(1.0 - sin_t2); float temp1 = n1 * cos_i; float temp2 = n2 * cos_t; temp1 = (temp1 - temp2) / (temp1 + temp2); float r = 0.5 * temp1 * temp1; temp1 = n2 * cos_i; temp2 = n1 * cos_t; temp1 = (temp1 - temp2) / (temp1 + temp2); r += 0.5 * temp1 * temp1; if (rand > r) { temp1 = n1 / n2; temp2 = temp1 * cos_i - cos_t; u->x = temp1 * (u->x) + temp2 * g->x; u->y = temp1 * (u->y) + temp2 * g->y; u->z = temp1 * (u->z) + temp2 * g->z; return 1.0; } else { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; return 0.0; } } } } } /******** END SUBROUTINE **********/ int getindex(mcconfig* cfg, int x, int y, int z) { return z * cfg->Ny * cfg->Nx + x * cfg->Ny + y; } void InterpGradient(mcconfig* cfg, float4* g, unsigned char* v, float4* pos, float4* n, unsigned char tissue) { if (pos->x >= cfg->Nx - 0.5) { pos->x = cfg->Nx - 0.51; } if (pos->y >= cfg->Ny - 0.5) { pos->y = cfg->Ny - 0.51; } if (pos->z >= cfg->Nz - 0.5) { pos->z = cfg->Nz - 0.51; } if (pos->x < 0.5) { pos->x = 0.51; } if (pos->y < 0.5) { pos->y = 0.51; } if (pos->z < 0.5) { pos->z = 0.51; } float x = round(pos->x); float y = round(pos->y); float z = round(pos->z); float xd = pos->x - x + 0.5; float yd = pos->y - y + 0.5; float zd = pos->z - z + 0.5; float v000, v001, v010, v011, v100, v101, v110, v111; v000 = (v[getindex(cfg, x - 1, y - 1, z - 1)] == tissue); v001 = (v[getindex(cfg, x - 1, y - 1, z)] == tissue); v010 = (v[getindex(cfg, x - 1, y, z - 1)] == tissue); v011 = (v[getindex(cfg, x - 1, y, z)] == tissue); v100 = (v[getindex(cfg, x, y - 1, z - 1)] == tissue); v101 = (v[getindex(cfg, x, y - 1, z)] == tissue); v110 = (v[getindex(cfg, x, y, z - 1)] == tissue); v111 = (v[getindex(cfg, x, y, z)] == tissue); float c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].x * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].x * v100; float c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].x * v001 + xd * g[getindex(cfg, x, y - 1, z)].x * v101; float c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].x * v010 + xd * g[getindex(cfg, x, y, z - 1)].x * v110; float c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].x * v011 + xd * g[getindex(cfg, x, y, z)].x * v111; float c0 = (1 - yd) * c00 + yd * c10; float c1 = (1 - yd) * c01 + yd * c11; n->x = c0 * (1 - zd) + c1 * zd; c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].y * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].y * v100; c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].y * v001 + xd * g[getindex(cfg, x, y - 1, z)].y * v101; c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].y * v010 + xd * g[getindex(cfg, x, y, z - 1)].y * v110; c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].y * v011 + xd * g[getindex(cfg, x, y, z)].y * v111; c0 = (1 - yd) * c00 + yd * c10; c1 = (1 - yd) * c01 + yd * c11; n->y = c0 * (1 - zd) + c1 * zd; c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].z * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].z * v100; c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].z * v001 + xd * g[getindex(cfg, x, y - 1, z)].z * v101; c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].z * v010 + xd * g[getindex(cfg, x, y, z - 1)].z * v110; c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].z * v011 + xd * g[getindex(cfg, x, y, z)].z * v111; c0 = (1 - yd) * c00 + yd * c10; c1 = (1 - yd) * c01 + yd * c11; n->z = c0 * (1 - zd) + c1 * zd; float magn = sqrt(n->x * n->x + n->y * n->y + n->z * n->z); n->x = n->x / magn; n->y = n->y / magn; n->z = n->z / magn; return; } void mcxyzn_kernel(mcconfig* cfg, unsigned char* v, float* F, int* dseed, float4* g, const int Nphotons) { int idx = omp_get_thread_num(); unsigned long seed[2]; seed[0] = dseed[idx * 2]; seed[1] = dseed[idx * 2 + 1]; for (int k = 0; k < Nphotons; k++) { //if (idx == 0) //printf("Thread %i photon %i/%i Rand %f %i %i\n",idx,k+1,Nphotons,RandomGen(seed),seed[0],seed[1]); /**** LAUNCH Initialize photon position and trajectory. *****/ //if (fmod(i_photon,10)==0) printf("photon %ld took %d steps\n",i_photon,CNT); float rnd; /* assigned random value 0-1 */ /* dummy values */ float temp; /* dummy variable */ int ix, iy, iz; /* Added. Used to track photons */ float4 pos; /* photon position .w = weight */ float4 u; /* photon trajectory .w = sleft */ unsigned char type; /* absorption coef [cm^-1] scattering coef [cm^-1] anisotropy [-] refractivity index [-] */ pos.w = 1.0; /* set photon weight to one */ float status = ALIVE; /* Launch an ALIVE photon */ int TIR_events = 0; LaunchPhoton(cfg, &pos, &u, &rnd, seed); /* Get tissue voxel properties of launchpoint. * If photon beyond outer edge of defined voxels, * the tissue equals properties of outermost voxels. * Therefore, set outermost voxels to infinite background value. */ ix = (int)(pos.x); iy = (int)(pos.y); iz = (int)(pos.z); if (ix >= cfg->Nx) ix = cfg->Nx - 1; if (iy >= cfg->Ny) iy = cfg->Ny - 1; if (iz >= cfg->Nz) iz = cfg->Nz - 1; if (ix < 0) ix = 0; if (iy < 0) iy = 0; if (iz < 0) iz = 0; /* Get the tissue type of located voxel */ int i = (int)(iz * cfg->Ny * cfg->Nx + ix * cfg->Ny + iy);//(iz*cfg->Ny*cfg->Nx + ix*cfg->Ny + iy); type = v[i]; int bflag = 1; /* HOP_DROP_SPIN_CHECK Propagate one photon until it dies as determined by ROULETTE. *******/ do { /**** HOP Take step to new position s = dimensionless stepsize x, uy, uz are cosines of current photon trajectory *****/ while ((rnd = RandomGen(seed)) <= ls); /* yields 0 < rnd <= 1 */ u.w = -log(rnd); /* dimensionless step */ do { // while sleft>0 float s = u.w / cfg->musv[type]; /* Step size [cm].*/ float tempx = pos.x + s * u.x / cfg->dx; /* Update positions. [cm] */ float tempy = pos.y + s * u.y / cfg->dx; float tempz = pos.z + s * u.z / cfg->dx; if (SameVoxel(cfg, pos.x, pos.y, pos.z, tempx, tempy, tempz)) /* photon in same voxel */ { pos.x = tempx; /* Update positions. */ pos.y = tempy; pos.z = tempz; /**** DROP Drop photon weight (W) into local bin. *****/ float absorb = pos.w * (1 - exp(-cfg->muav[type] * s)); /* photon weight absorbed at this step */ if (absorb != absorb) { status = DEAD; u.w = 0; } else { //atomicadd(&(F[i]),absorb); if (bflag) { F[i] += absorb; } pos.w -= absorb; /* decrement WEIGHT by amount absorbed */ } // If photon within volume of heterogeneity, deposit energy in F[]. // Normalize F[] later, when save output. /* Update sleft */ u.w = 0; /* dimensionless step remaining */ } else /* photon has crossed voxel boundary */ { /* step to voxel face + "littlest step" so just inside new voxel. */ s = ls + FindVoxelFace2(cfg, pos.x, pos.y, pos.z, tempx, tempy, tempz, u.x, u.y, u.z); float temp_px, temp_py, temp_pz; /* Update positions. */ temp_px = pos.x + s * u.x / cfg->dx; temp_py = pos.y + s * u.y / cfg->dx; temp_pz = pos.z + s * u.z / cfg->dx; /**** DROP Drop photon weight (W) into local bin. *****/ float absorb = pos.w * (1 - exp(-cfg->muav[type] * s)); /* photon weight absorbed at this step */ if (absorb != absorb) { status = DEAD; u.w = 0; } else { //atomicadd(&(F[i]),absorb); if (bflag) { F[i] += absorb; } pos.w -= absorb; /* decrement WEIGHT by amount absorbed */ } /* Update sleft */ u.w -= s * cfg->musv[type]; /* dimensionless step remaining */ if (u.w <= ls) u.w = 0; int temp_ix = (int)floor(temp_px); int temp_iy = (int)floor(temp_py); int temp_iz = (int)floor(temp_pz); bflag = 1; //boundary flag. Initalize as 1 = inside volume, then check; if (cfg->boundaryflag == 0) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; bflag = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; bflag = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; bflag = 0; } if (temp_iz < 0) { temp_iz = 0; bflag = 0; } if (temp_ix < 0) { temp_ix = 0; bflag = 0; } if (temp_iy < 0) { temp_iy = 0; bflag = 0; } } else if (cfg->boundaryflag == 1) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; status = DEAD; u.w = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; status = DEAD; u.w = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; status = DEAD; u.w = 0; } if (temp_iz < 0) { temp_iz = 0; status = DEAD; u.w = 0; } if (temp_ix < 0) { temp_ix = 0; status = DEAD; u.w = 0; } if (temp_iy < 0) { temp_iy = 0; status = DEAD; u.w = 0; } } else if (cfg->boundaryflag == 2) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; bflag = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; bflag = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; bflag = 0; } if (temp_iz < 0) { temp_iz = 0; status = DEAD; u.w = 0; } if (temp_ix < 0) { temp_ix = 0; bflag = 0; } if (temp_iy < 0) { temp_iy = 0; bflag = 0; } } int p = (int)(temp_iz * cfg->Ny * cfg->Nx + temp_ix * cfg->Ny + temp_iy); int fstatus = 1; float4 ga = g[i]; float4 gb = { ix - temp_ix,iy - temp_iy,iz - temp_iz,0.0 }; if (cfg->gradientflag == 2) { InterpGradient(cfg, g, v, &pos, &ga, v[i]); fstatus = RFresnel(&u, &ga, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } else if (cfg->gradientflag == 1) { fstatus = RFresnel(&u, &ga, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } else if (cfg->gradientflag == 0) { fstatus = RFresnel(&u, &gb, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } if (fstatus == 0) { pos.x = temp_px + (pos.x - temp_px) * ls * 2; pos.y = temp_py + (pos.y - temp_py) * ls * 2; pos.z = temp_pz + (pos.z - temp_pz) * ls * 2; } else { ix = temp_ix; iy = temp_iy; iz = temp_iz; // update pointer to tissue type type = v[p]; pos.x = temp_px; pos.y = temp_py; pos.z = temp_pz; i = p; } } //(sv) /* same voxel */ } while (u.w > 0.f); /**** SPIN Scatter photon into new trajectory defined by theta and psi. Theta is specified by cos(theta), which is determined based on the Henyey-Greenstein scattering function. Convert theta and psi into cosines ux, uy, uz. *****/ /* Sample for costheta */ rnd = RandomGen(seed); float ctheta, stheta, psi, cpsi, spsi; if (cfg->gv[type] == 0.0) ctheta = 2.0 * rnd - 1.0; else { temp = (1.0 - cfg->gv[type] * cfg->gv[type]) / (1.0 - cfg->gv[type] + 2 * cfg->gv[type] * rnd); ctheta = (1.0 + cfg->gv[type] * cfg->gv[type] - temp * temp) / (2.0 * cfg->gv[type]); } stheta = sqrt(1.0 - ctheta * ctheta); /* sqrtf() is faster than sin(). */ /* Sample psi. */ psi = 2.0 * PI * RandomGen(seed); cpsi = cos(psi); if (psi < PI) spsi = sqrt(1.0 - cpsi * cpsi); /* sqrtf() is faster than sin(). */ else spsi = -sqrt(1.0 - cpsi * cpsi); /* New trajectory. */ if (1 - fabs(u.z) <= ls) { /* close to perpendicular. */ u.x = stheta * cpsi; u.y = stheta * spsi; u.z = ctheta * SIGN(u.z); /* SIGN() is faster than division. */ } else { /* usually use this option */ temp = sqrt(1.0 - u.z * u.z); float ux, uy, uz; ux = stheta * (u.x * u.z * cpsi - u.y * spsi) / temp + u.x * ctheta; uy = stheta * (u.y * u.z * cpsi + u.x * spsi) / temp + u.y * ctheta; uz = -stheta * cpsi * temp + u.z * ctheta; u.x = ux; u.y = uy; u.z = uz; } /**** CHECK ROULETTE If photon weight below THRESHOLD, then terminate photon using Roulette technique. Photon has CHANCE probability of having its weight increased by factor of 1/CHANCE, and 1-CHANCE probability of terminating. *****/ if (pos.w < THRESHOLD) { if (RandomGen(seed) * CHANCE <= 1.f) pos.w *= CHANCE; else status = DEAD; } } while (status == ALIVE); /* end STEP_CHECK_HOP_SPIN */ /* if ALIVE, continue propagating */ /* If photon DEAD, then launch new photon. */ } // // return; }
/******************************************** * mcxyzn, in ANSI Standard C programing language * Usage: mcxyz myname and myname_T.bin * which loads myname_H.mci, and saves myname_F.bin. * * Initial version is an extension of mcxyz.c and the methodology is described in: * A.P. Tran and S.L. Jacques, 2020. * Modeling voxel-based Monte Carlo light transport with curved and oblique boundary surfaces. * Journal of Biomedical Optics, 25(2), p.025001. * * USAGE mcxyzn myname * where myname is the user's choice. * The program reads two files prepared by user: * myname_H.mci = header input file for mcxyz * myname_T.bin = tissue structure file * The output will be written to 3 files: * myname_OP.m = optical properties (mua, mus, g for each tissue type) * myname_F.bin = fluence rate output F[i] [W/cm^2 per W delivered] * * The MATLAB program maketissue.m can create the two input files (myname_H.mci, myname_T.bin). * * The MATLAB program lookmcxyzn.m can read the output files and display * 1. Fluence rate F [W/cm^2 per W delivered] * 2. Deposition rate A [W/cm^3 per W delivered]. * * Log: * Original mcxyz.c was created by Steven L. Jacques and Ting Li (Oregon Health & Science University), 2010/2012. * Written by Ting based on Steve's mcsub.c., 2010. * Use Ting's FindVoxelFace(). * Use Steve's FindVoxelFace(), Dec. 30, 2010. * Reorganized by Steve. May 8, 2012: * Reads input files, outputs binary files. **********/ #include "mcxyzn.h" int main(int argc, const char * argv[]) { if (argc==0) { printf("assuming you've compiled mcxyz.c as gomcxyz ...\n"); printf("USAGE: gomcxyz name\n"); printf("which will load the files name_H.mci and name_T.bin\n"); printf("and run the Monte Carlo program.\n"); printf("Yields name_F.bin, which holds the fluence rate distribution.\n"); return 0; } mcconfig cfg; mcxyzn_init(&cfg,argc,argv); mcxyzn_launchsimulation(&cfg); return 0; } /* end of main */ void mcxyzn_init(mcconfig *cfg,int argc, const char * argv[]) { long int i; /* Input/Output */ strcpy(cfg->myname, argv[1]); // acquire name from argument of function call by user. printf("name = %s\n",cfg->myname); /**** INPUT FILES *****/ /* IMPORT myname_H.mci */ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_H.mci"); cfg->fid = fopen(cfg->filename,"r"); fgets(cfg->buf, 32, cfg->fid); // run parameters sscanf(cfg->buf, "%f", &cfg->time_min); // desired time duration of run [min] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nx); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Ny); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nz); // # of bins fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dx); // size of bins [cm] fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dy); // size of bins [cm] fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->dz); // size of bins [cm] // launch parameters fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->mcflag); // mcflag, 0 = uniform, 1 = Gaussian, 2 = iso-pt fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->launchflag); // launchflag, 0 = ignore, 1 = manually set fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->boundaryflag); // 0 = no boundaries, 1 = escape at all boundaries, 2 = escape at surface only fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->gradientflag); fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->xs); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->ys); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->zs); // initial launch point fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->xfocus); // xfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->yfocus); // yfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->zfocus); // zfocus fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->ux0); // ux trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->uy0); // uy trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->uz0); // uz trajectory fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->radius); // radius fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%f", &cfg->waist); // waist // tissue optical properties fgets(cfg->buf, 32,cfg->fid); sscanf(cfg->buf, "%d", &cfg->Nt); // # of tissue types in tissue list for (i=1; i<=cfg->Nt; i++) { fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->muav[i]); // absorption coeff [cm^-1] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->musv[i]); // scattering coeff [cm^-1] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->gv[i]); // anisotropy of scatter [dimensionless] fgets(cfg->buf, 32, cfg->fid); sscanf(cfg->buf, "%f", &cfg->nv[i]); } fclose(cfg->fid); printf("time_min = %0.2f min\n",cfg->time_min); printf("Nx = %d, dx = %0.4f [cm]\n",cfg->Nx,cfg->dx); printf("Ny = %d, dy = %0.4f [cm]\n",cfg->Ny,cfg->dy); printf("Nz = %d, dz = %0.4f [cm]\n",cfg->Nz,cfg->dz); printf("xs = %0.4f [cm]\n",cfg->xs); printf("ys = %0.4f [cm]\n",cfg->ys); printf("zs = %0.4f [cm]\n",cfg->zs); printf("mcflag = %d\n",cfg->mcflag); if (cfg->mcflag==0) printf("launching uniform flat-field beam\n"); if (cfg->mcflag==1) printf("launching Gaussian beam\n"); if (cfg->mcflag==2) printf("launching isotropic point source\n"); if (cfg->mcflag==3) printf("launching square source\n"); printf("xfocus = %0.4f [cm]\n",cfg->xfocus); printf("yfocus = %0.4f [cm]\n",cfg->yfocus); printf("zfocus = %0.2e [cm]\n",cfg->zfocus); if (cfg->launchflag==1) { printf("Launchflag ON, so launch the following:\n"); printf("ux0 = %0.4f [cm]\n",cfg->ux0); printf("uy0 = %0.4f [cm]\n",cfg->uy0); printf("uz0 = %0.4f [cm]\n",cfg->uz0); } else { printf("Launchflag OFF, so program calculates launch angles.\n"); printf("radius = %0.4f [cm]\n",cfg->radius); printf("waist = %0.4f [cm]\n",cfg->waist); } if (cfg->boundaryflag==0) printf("boundaryflag = 0, so no boundaries.\n"); else if (cfg->boundaryflag==1) printf("boundaryflag = 1, so escape at all boundaries.\n"); else if (cfg->boundaryflag==2) printf("boundaryflag = 2, so escape at surface only.\n"); else{ printf("improper boundaryflag. quit.\n"); //return 0; } printf("# of tissues available, Nt = %d\n",cfg->Nt); for (i=1; i<=cfg->Nt; i++) { printf("muav[%ld] = %0.4f [cm^-1]\n",i,cfg->muav[i]); printf("musv[%ld] = %0.4f [cm^-1]\n",i,cfg->musv[i]); printf(" gv[%ld] = %0.4f [--]\n",i,cfg->gv[i]); printf(" nv[%ld] = %0.4f [--]\n\n",i,cfg->nv[i]); } // SAVE optical properties, for later use by MATLAB. strcpy(cfg->filename,cfg->myname); strcat(cfg->filename,"_props.m"); cfg->fid = fopen(cfg->filename,"w"); for (i=1; i<=cfg->Nt; i++) { fprintf(cfg->fid,"muav(%ld) = %0.4f;\n",i,cfg->muav[i]); fprintf(cfg->fid,"musv(%ld) = %0.4f;\n",i,cfg->musv[i]); fprintf(cfg->fid,"gv(%ld) = %0.4f;\n",i,cfg->gv[i]); fprintf(cfg->fid,"nv(%ld) = %0.4f;\n\n",i,cfg->nv[i]); } fclose(cfg->fid); /* IMPORT BINARY TISSUE FILE */ cfg->NN = cfg->Nx*cfg->Ny*cfg->Nz; return; } void mcxyzn_launchsimulation(mcconfig *cfg) { unsigned char *v; float *F; float4 *g; //float *R_host; /* Initializing storing elements */ v = (unsigned char *)malloc(cfg->NN*sizeof(unsigned char)); /* tissue structure */ F = (float *)malloc(cfg->NN*sizeof(float)); /* relative fluence rate [W/cm^2/W.delivered] */ g = (float4 *)malloc(cfg->NN*sizeof(float4)); //R = (float *)malloc(cfg->Nx*cfg->Ny*sizeof(cl_mem)); /* escaping flux [W/cm^2/W.delivered] */ //for (i=0; i<Ny*Nx; i++) R[i] = 0; //prop_host = (float4 *)malloc(cfg->NN*sizeof(float4)); //cl_float *R = (cl_float *)(R_host); /* read binary file */ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_T.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(v, sizeof(unsigned char), cfg->NN, cfg->fid); fclose(cfg->fid); if (cfg->gradientflag > 0){ strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gx.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].x = F[j]; // ensure F[] starts empty. } strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gy.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].y = F[j]; // ensure F[] starts empty. } strcpy(cfg->filename,cfg->myname); strcat(cfg->filename, "_Gz.bin"); cfg->fid = fopen(cfg->filename, "rb"); fread(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); for(int j=0; j<cfg->NN;j++) { g[j].z = F[j]; // ensure F[] starts empty. } } for(int j=0; j<cfg->NN;j++) { F[j] = 0.f; // ensure F[] starts empty. } /* Show tissue on screen, along central z-axis, by listing tissue type #'s.*/ printf("central axial profile of tissue types:\n"); for (int iz=0; iz<cfg->Nz; iz++) { int i = (long)(iz*cfg->Ny*cfg->Nx + (cfg->Ny/2)*cfg->Nx + cfg->Nx/2); printf("%d",v[i]); } printf("\n\n"); /************************************* * == Setting up OpenCL structure == * *************************************/ /* Number of photons launched */ cfg->Nphotons = 50000; int nb_threads = omp_get_max_threads(); /* Create seeds for threads */ int *seed; seed = (int *)malloc(nb_threads*sizeof(int)*2); for (int i=0;i<nb_threads;i++){ seed[i*2]= rand(); seed[i*2+1] = rand(); } printf("Creating random seed of length (2 seeds per thread): %i \n",(int)nb_threads*2); /******************************************************************* * ============================ MAJOR CYCLE ======================== *******************************************************************/ cfg->start_time = clock(); cfg->now = time(NULL); printf("\n%s\n", ctime(&cfg->now)); /* Launch main kernel */ printf("[====== Main kernel ======]\n"); printf("Launching %i photons with %i threads. \n",(int)cfg->Nphotons,(int)nb_threads); double start,start2; double end,end2; start = omp_get_wtime(); mcxyz_kernel(cfg,v,F,seed,g,cfg->Nphotons); end = omp_get_wtime(); printf("Test kernel of %i photons took %f sec.\n",(int) cfg->Nphotons, end-start); cfg->Nphotons = ceil(cfg->Nphotons/(end-start)*cfg->time_min*60)- cfg->Nphotons; printf("Launching remaining %i photons.\n", (int) cfg->Nphotons); start2 = omp_get_wtime(); mcxyz_kernel(cfg,v,F,seed,g,cfg->Nphotons); end2 = omp_get_wtime(); printf("Main kernel took %f sec.\n",end2-start2); printf("Total running time of %f sec for %i photons. \n",end+end2-start-start2,(int)cfg->Nphotons+50000); /* printf("------------------------------------------------------\n"); */ /* printf("Elapsed Time for %i photons = %f sec\n",(int)cfg->Nphotons,(float)(end-start)/(1e9)); */ /* printf("%i photons per minute\n", (int) (cfg->Nphotons/(end-start)*(1e9)*60)); */ /* printf("------------------------------------------------------\n"); */ /************** * == Save == * **************/ // Normalize deposition (A) to yield fluence rate (F). float temp = cfg->dx*cfg->dy*cfg->dz*(cfg->Nphotons); for (int i=0; i<cfg->NN;i++){ F[i] = (F[i]/(temp*cfg->muav[v[i]])); } // Save the binary file strcpy(cfg->filename,cfg->myname); strcat(cfg->filename,"_F.bin"); printf("saving %s\n",cfg->filename); cfg->fid = fopen(cfg->filename, "wb"); /* 3D voxel output */ fwrite(F, sizeof(float), cfg->NN, cfg->fid); fclose(cfg->fid); /* save reflectance */ /*float temp = cfg->dx*cfg->dy*(cfg->Nphotons+50000); for (int i=0; i<cfg->Nx*cfg->Ny;i++){ R[i] = (F[i]/(temp)); } strcpy(filename,myname); strcat(filename,"_Ryx.bin"); printf("saving %s\n",filename); fid = fopen(filename, "wb"); /* 2D voxel output */ /*fwrite(R, sizeof(float), cfg->Ny*cfg->Nx, fid); fclose(fid); printf("%s is done.\n",myname);*/ printf("------------------------------------------------------\n"); cfg->now = time(NULL); printf("%s\n", ctime(&cfg->now)); free(F); free(v); free(g); //free(R_host); return; } /* If 1+cos(theta) <= ONE_MINUS_COSZERO, fabs(PI-theta) <= 1e-6 rad. */ /* SUBROUTINES */ static inline unsigned long rotl(const unsigned long x, int k) { return (x << k) | (x >> (64 - k)); } /********************* * RandomGen * *********************/ float RandomGen(unsigned long* s) { union { unsigned long i; unsigned int u[2]; float f[2]; } result; result.i = s[0] + s[1]; s[1] ^= s[0]; s[0] = rotl(s[0], 24) ^ s[1] ^ (s[1] << 16); // a, b s[1] = rotl(s[1], 37); // c result.u[0] = 0x3F800000U | (result.u[0] >> 9); return result.f[0] - 1.f; } /************* SET SOURCE*************** * Launch collimated beam at x,y center. ***************************************/ void LaunchPhoton(mcconfig* cfg, float4* pos, float4* u, float* rnd, unsigned long* seed) { float r, phi, temp; /****************************/ /* Initial position. */ /* trajectory */ if (cfg->launchflag == 1) { // manually set launch pos->x = cfg->xs; pos->y = cfg->ys; pos->z = cfg->zs; u->x = cfg->ux0; u->y = cfg->uy0; u->z = cfg->uz0; } else { // use mcflag if (cfg->mcflag == 0) { // uniform beam //set launch point and width of beam while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 r = cfg->radius * sqrt(*rnd); // radius of beam at launch point while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 phi = (*rnd) * 2.0 * PI; pos->x = cfg->xs + r * cos(phi); pos->y = cfg->ys + r * sin(phi); pos->z = cfg->zs; // set trajectory toward focus while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 r = cfg->waist * sqrt(*rnd); // radius of beam at focus while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 phi = (*rnd) * 2.0 * PI; float xfocus = cfg->xs + r * cos(phi); //SLJ add cfg->xs float yfocus = cfg->ys + r * sin(phi); //SLJ add cfg->ys temp = 1 / sqrt((pos->x - xfocus) * (pos->x - xfocus) + (pos->y - yfocus) * (pos->y - yfocus) + (pos->z - cfg->zfocus) * (pos->z - cfg->zfocus)); u->x = -(pos->x - xfocus) * temp; u->y = -(pos->y - yfocus) * temp; u->z = sqrt(1 - u->x * u->x - u->y * u->y); } else if (cfg->mcflag == 2) { // isotropic pt source float ctheta = 1.0 - 2.0 * RandomGen(seed); float stheta = sqrt(1.0 - ctheta * ctheta); float psi = 2.0 * PI * RandomGen(seed); float cpsi = cos(psi); float spsi; if (psi < PI) spsi = sqrt(1.0 - cpsi * cpsi); else spsi = -sqrt(1.0 - cpsi * cpsi); pos->x = cfg->xs; pos->y = cfg->ys; pos->z = cfg->zs; u->x = stheta * cpsi; u->y = stheta * spsi; u->z = ctheta; } else if (cfg->mcflag == 3) { // rectangular source collimated while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 pos->x = cfg->radius * ((*rnd) * 2 - 1); // use radius to specify x-halfwidth of rectangle while ((*rnd = RandomGen(seed)) <= 0.0); // avoids rnd = 0 pos->y = cfg->radius * ((*rnd) * 2 - 1); // use radius to specify y-halfwidth of rectangle pos->z = cfg->zs; u->x = 0.0; u->y = 0.0; u->z = 1.0; // collimated beam } } // end use mcflag pos->x = cfg->Nx / 2 + pos->x / cfg->dx; pos->y = cfg->Ny / 2 + pos->y / cfg->dy; pos->z = pos->z / cfg->dz; /****************************/ } /*********************************************************** * Determine if the two position are located in the same voxel * Returns 1 if same voxel, 0 if not same voxel. ****/ int SameVoxel(mcconfig* cfg, float x1, float y1, float z1, float x2, float y2, float z2) { float xmin = fmin((floor)(x1), (floor)(x2)); float ymin = fmin((floor)(y1), (floor)(y2)); float zmin = fmin((floor)(z1), (floor)(z2)); float xmax = xmin + 1; float ymax = ymin + 1; float zmax = zmin + 1; return ((x1 <= xmax && x2 <= xmax && y1 <= ymax && y2 <= ymax && z1 < zmax && z2 <= zmax)); } /******************** * my version of FindVoxelFace for no scattering. * s = ls + FindVoxelFace2(x,y,z, tempx, tempy, tempz, dx, dy, dz, ux, uy, uz); ****/ float FindVoxelFace2(mcconfig* cfg, float x1, float y1, float z1, float x2, float y2, float z2, float ux, float uy, float uz) { int ix1 = floor(x1); int iy1 = floor(y1); int iz1 = floor(z1); int ix2, iy2, iz2; if (ux >= 0) ix2 = ix1 + 1; else ix2 = ix1; if (uy >= 0) iy2 = iy1 + 1; else iy2 = iy1; if (uz >= 0) iz2 = iz1 + 1; else iz2 = iz1; float xs = fabs((ix2 - x1) / ux); float ys = fabs((iy2 - y1) / uy); float zs = fabs((iz2 - z1) / uz); float s = fmin(xs, fmin(ys, zs)); return (s * cfg->dx); } /*********************************************************** * FRESNEL REFLECTANCE * Computes reflectance as photon passes from medium 1 to * medium 2 with refractive indices n1,n2. Incident * angle a1 is specified by cosine value ca1 = cos(a1). * Program returns value of transmitted angle a1 as * value in *ca2_Ptr = cos(a2). ****/ float RFresnel(float4* u, float4* g, float4* gb, float n1, float n2, unsigned long* seed, int* TIR_events, float* status) { if (n1 == n2) { return 1.0; } else { if ((g->x * g->x + g->y * g->y + g->z * g->z) == 0) { *g = *gb; } float rand = RandomGen(seed); float cos_i = -(u->x) * g->x - (u->y) * g->y - (u->z) * g->z; if (cos_i > 0.99999) { float r = (n2 - n1) / (n2 + n1); r *= r; if (rand > r) { //u->x = -g->x, u->y = -g->y, u->z = -g->z; return 1.0; } else { u->x = -u->x, u->y = -u->y, u->z = -u->z; return 0.0; } } else if (cos_i < 1e-5) { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; return 0.0; } else { float sin_t2 = pow(n1 / n2, 2) * (1 - cos_i * cos_i); if (sin_t2 >= 1.0) { if (*TIR_events < MAX_TIR) { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; (*TIR_events)++; return 0.0; } else { *status = DEAD; u->w = 0.0; return 1.0; } } else { float cos_t = sqrt(1.0 - sin_t2); float temp1 = n1 * cos_i; float temp2 = n2 * cos_t; temp1 = (temp1 - temp2) / (temp1 + temp2); float r = 0.5 * temp1 * temp1; temp1 = n2 * cos_i; temp2 = n1 * cos_t; temp1 = (temp1 - temp2) / (temp1 + temp2); r += 0.5 * temp1 * temp1; if (rand > r) { temp1 = n1 / n2; temp2 = temp1 * cos_i - cos_t; u->x = temp1 * (u->x) + temp2 * g->x; u->y = temp1 * (u->y) + temp2 * g->y; u->z = temp1 * (u->z) + temp2 * g->z; return 1.0; } else { u->x = u->x + 2 * cos_i * g->x; u->y = u->y + 2 * cos_i * g->y; u->z = u->z + 2 * cos_i * g->z; return 0.0; } } } } } /******** END SUBROUTINE **********/ int getindex(mcconfig* cfg, int x, int y, int z) { return z * cfg->Ny * cfg->Nx + x * cfg->Ny + y; } void InterpGradient(mcconfig* cfg, float4* g, unsigned char* v, float4* pos, float4* n, unsigned char tissue) { if (pos->x >= cfg->Nx - 0.5) { pos->x = cfg->Nx - 0.51; } if (pos->y >= cfg->Ny - 0.5) { pos->y = cfg->Ny - 0.51; } if (pos->z >= cfg->Nz - 0.5) { pos->z = cfg->Nz - 0.51; } if (pos->x < 0.5) { pos->x = 0.51; } if (pos->y < 0.5) { pos->y = 0.51; } if (pos->z < 0.5) { pos->z = 0.51; } float x = round(pos->x); float y = round(pos->y); float z = round(pos->z); float xd = pos->x - x + 0.5; float yd = pos->y - y + 0.5; float zd = pos->z - z + 0.5; float v000, v001, v010, v011, v100, v101, v110, v111; v000 = (v[getindex(cfg, x - 1, y - 1, z - 1)] == tissue); v001 = (v[getindex(cfg, x - 1, y - 1, z)] == tissue); v010 = (v[getindex(cfg, x - 1, y, z - 1)] == tissue); v011 = (v[getindex(cfg, x - 1, y, z)] == tissue); v100 = (v[getindex(cfg, x, y - 1, z - 1)] == tissue); v101 = (v[getindex(cfg, x, y - 1, z)] == tissue); v110 = (v[getindex(cfg, x, y, z - 1)] == tissue); v111 = (v[getindex(cfg, x, y, z)] == tissue); float c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].x * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].x * v100; float c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].x * v001 + xd * g[getindex(cfg, x, y - 1, z)].x * v101; float c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].x * v010 + xd * g[getindex(cfg, x, y, z - 1)].x * v110; float c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].x * v011 + xd * g[getindex(cfg, x, y, z)].x * v111; float c0 = (1 - yd) * c00 + yd * c10; float c1 = (1 - yd) * c01 + yd * c11; n->x = c0 * (1 - zd) + c1 * zd; c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].y * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].y * v100; c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].y * v001 + xd * g[getindex(cfg, x, y - 1, z)].y * v101; c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].y * v010 + xd * g[getindex(cfg, x, y, z - 1)].y * v110; c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].y * v011 + xd * g[getindex(cfg, x, y, z)].y * v111; c0 = (1 - yd) * c00 + yd * c10; c1 = (1 - yd) * c01 + yd * c11; n->y = c0 * (1 - zd) + c1 * zd; c00 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z - 1)].z * v000 + xd * g[getindex(cfg, x, y - 1, z - 1)].z * v100; c01 = (1 - xd) * g[getindex(cfg, x - 1, y - 1, z)].z * v001 + xd * g[getindex(cfg, x, y - 1, z)].z * v101; c10 = (1 - xd) * g[getindex(cfg, x - 1, y, z - 1)].z * v010 + xd * g[getindex(cfg, x, y, z - 1)].z * v110; c11 = (1 - xd) * g[getindex(cfg, x - 1, y, z)].z * v011 + xd * g[getindex(cfg, x, y, z)].z * v111; c0 = (1 - yd) * c00 + yd * c10; c1 = (1 - yd) * c01 + yd * c11; n->z = c0 * (1 - zd) + c1 * zd; float magn = sqrt(n->x * n->x + n->y * n->y + n->z * n->z); n->x = n->x / magn; n->y = n->y / magn; n->z = n->z / magn; return; } void mcxyzn_kernel(mcconfig* cfg, unsigned char* v, float* F, int* dseed, float4* g, const int Nphotons) { #pragma omp parallel { int idx = omp_get_thread_num(); unsigned long seed[2]; seed[0] = dseed[idx * 2]; seed[1] = dseed[idx * 2 + 1]; #pragma omp for for (int k = 0; k < Nphotons; k++) { //if (idx == 0) //printf("Thread %i photon %i/%i Rand %f %i %i\n",idx,k+1,Nphotons,RandomGen(seed),seed[0],seed[1]); /**** LAUNCH Initialize photon position and trajectory. *****/ //if (fmod(i_photon,10)==0) printf("photon %ld took %d steps\n",i_photon,CNT); float rnd; /* assigned random value 0-1 */ /* dummy values */ float temp; /* dummy variable */ int ix, iy, iz; /* Added. Used to track photons */ float4 pos; /* photon position .w = weight */ float4 u; /* photon trajectory .w = sleft */ unsigned char type; /* absorption coef [cm^-1] scattering coef [cm^-1] anisotropy [-] refractivity index [-] */ pos.w = 1.0; /* set photon weight to one */ float status = ALIVE; /* Launch an ALIVE photon */ int TIR_events = 0; LaunchPhoton(cfg, &pos, &u, &rnd, seed); /* Get tissue voxel properties of launchpoint. * If photon beyond outer edge of defined voxels, * the tissue equals properties of outermost voxels. * Therefore, set outermost voxels to infinite background value. */ ix = (int)(pos.x); iy = (int)(pos.y); iz = (int)(pos.z); if (ix >= cfg->Nx) ix = cfg->Nx - 1; if (iy >= cfg->Ny) iy = cfg->Ny - 1; if (iz >= cfg->Nz) iz = cfg->Nz - 1; if (ix < 0) ix = 0; if (iy < 0) iy = 0; if (iz < 0) iz = 0; /* Get the tissue type of located voxel */ int i = (int)(iz * cfg->Ny * cfg->Nx + ix * cfg->Ny + iy);//(iz*cfg->Ny*cfg->Nx + ix*cfg->Ny + iy); type = v[i]; int bflag = 1; /* HOP_DROP_SPIN_CHECK Propagate one photon until it dies as determined by ROULETTE. *******/ do { /**** HOP Take step to new position s = dimensionless stepsize x, uy, uz are cosines of current photon trajectory *****/ while ((rnd = RandomGen(seed)) <= ls); /* yields 0 < rnd <= 1 */ u.w = -log(rnd); /* dimensionless step */ do { // while sleft>0 float s = u.w / cfg->musv[type]; /* Step size [cm].*/ float tempx = pos.x + s * u.x / cfg->dx; /* Update positions. [cm] */ float tempy = pos.y + s * u.y / cfg->dx; float tempz = pos.z + s * u.z / cfg->dx; if (SameVoxel(cfg, pos.x, pos.y, pos.z, tempx, tempy, tempz)) /* photon in same voxel */ { pos.x = tempx; /* Update positions. */ pos.y = tempy; pos.z = tempz; /**** DROP Drop photon weight (W) into local bin. *****/ float absorb = pos.w * (1 - exp(-cfg->muav[type] * s)); /* photon weight absorbed at this step */ if (absorb != absorb) { status = DEAD; u.w = 0; } else { //atomicadd(&(F[i]),absorb); if (bflag) { #pragma omp atomic F[i] += absorb; } pos.w -= absorb; /* decrement WEIGHT by amount absorbed */ } // If photon within volume of heterogeneity, deposit energy in F[]. // Normalize F[] later, when save output. /* Update sleft */ u.w = 0; /* dimensionless step remaining */ } else /* photon has crossed voxel boundary */ { /* step to voxel face + "littlest step" so just inside new voxel. */ s = ls + FindVoxelFace2(cfg, pos.x, pos.y, pos.z, tempx, tempy, tempz, u.x, u.y, u.z); float temp_px, temp_py, temp_pz; /* Update positions. */ temp_px = pos.x + s * u.x / cfg->dx; temp_py = pos.y + s * u.y / cfg->dx; temp_pz = pos.z + s * u.z / cfg->dx; /**** DROP Drop photon weight (W) into local bin. *****/ float absorb = pos.w * (1 - exp(-cfg->muav[type] * s)); /* photon weight absorbed at this step */ if (absorb != absorb) { status = DEAD; u.w = 0; } else { //atomicadd(&(F[i]),absorb); if (bflag) { #pragma omp atomic F[i] += absorb; } pos.w -= absorb; /* decrement WEIGHT by amount absorbed */ } /* Update sleft */ u.w -= s * cfg->musv[type]; /* dimensionless step remaining */ if (u.w <= ls) u.w = 0; int temp_ix = (int)floor(temp_px); int temp_iy = (int)floor(temp_py); int temp_iz = (int)floor(temp_pz); bflag = 1; //boundary flag. Initalize as 1 = inside volume, then check; if (cfg->boundaryflag == 0) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; bflag = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; bflag = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; bflag = 0; } if (temp_iz < 0) { temp_iz = 0; bflag = 0; } if (temp_ix < 0) { temp_ix = 0; bflag = 0; } if (temp_iy < 0) { temp_iy = 0; bflag = 0; } } else if (cfg->boundaryflag == 1) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; status = DEAD; u.w = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; status = DEAD; u.w = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; status = DEAD; u.w = 0; } if (temp_iz < 0) { temp_iz = 0; status = DEAD; u.w = 0; } if (temp_ix < 0) { temp_ix = 0; status = DEAD; u.w = 0; } if (temp_iy < 0) { temp_iy = 0; status = DEAD; u.w = 0; } } else if (cfg->boundaryflag == 2) { if (temp_iz >= cfg->Nz) { temp_iz = cfg->Nz - 1; bflag = 0; } if (temp_ix >= cfg->Nx) { temp_ix = cfg->Nx - 1; bflag = 0; } if (temp_iy >= cfg->Ny) { temp_iy = cfg->Ny - 1; bflag = 0; } if (temp_iz < 0) { temp_iz = 0; status = DEAD; u.w = 0; } if (temp_ix < 0) { temp_ix = 0; bflag = 0; } if (temp_iy < 0) { temp_iy = 0; bflag = 0; } } int p = (int)(temp_iz * cfg->Ny * cfg->Nx + temp_ix * cfg->Ny + temp_iy); int fstatus = 1; float4 ga = g[i]; float4 gb = { ix - temp_ix,iy - temp_iy,iz - temp_iz,0.0 }; if (cfg->gradientflag == 2) { InterpGradient(cfg, g, v, &pos, &ga, v[i]); fstatus = RFresnel(&u, &ga, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } else if (cfg->gradientflag == 1) { fstatus = RFresnel(&u, &ga, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } else if (cfg->gradientflag == 0) { fstatus = RFresnel(&u, &gb, &gb, cfg->nv[type], cfg->nv[(int)v[p]], seed, &TIR_events, &status); } if (fstatus == 0) { pos.x = temp_px + (pos.x - temp_px) * ls * 2; pos.y = temp_py + (pos.y - temp_py) * ls * 2; pos.z = temp_pz + (pos.z - temp_pz) * ls * 2; } else { ix = temp_ix; iy = temp_iy; iz = temp_iz; // update pointer to tissue type type = v[p]; pos.x = temp_px; pos.y = temp_py; pos.z = temp_pz; i = p; } } //(sv) /* same voxel */ } while (u.w > 0.f); /**** SPIN Scatter photon into new trajectory defined by theta and psi. Theta is specified by cos(theta), which is determined based on the Henyey-Greenstein scattering function. Convert theta and psi into cosines ux, uy, uz. *****/ /* Sample for costheta */ rnd = RandomGen(seed); float ctheta, stheta, psi, cpsi, spsi; if (cfg->gv[type] == 0.0) ctheta = 2.0 * rnd - 1.0; else { temp = (1.0 - cfg->gv[type] * cfg->gv[type]) / (1.0 - cfg->gv[type] + 2 * cfg->gv[type] * rnd); ctheta = (1.0 + cfg->gv[type] * cfg->gv[type] - temp * temp) / (2.0 * cfg->gv[type]); } stheta = sqrt(1.0 - ctheta * ctheta); /* sqrtf() is faster than sin(). */ /* Sample psi. */ psi = 2.0 * PI * RandomGen(seed); cpsi = cos(psi); if (psi < PI) spsi = sqrt(1.0 - cpsi * cpsi); /* sqrtf() is faster than sin(). */ else spsi = -sqrt(1.0 - cpsi * cpsi); /* New trajectory. */ if (1 - fabs(u.z) <= ls) { /* close to perpendicular. */ u.x = stheta * cpsi; u.y = stheta * spsi; u.z = ctheta * SIGN(u.z); /* SIGN() is faster than division. */ } else { /* usually use this option */ temp = sqrt(1.0 - u.z * u.z); float ux, uy, uz; ux = stheta * (u.x * u.z * cpsi - u.y * spsi) / temp + u.x * ctheta; uy = stheta * (u.y * u.z * cpsi + u.x * spsi) / temp + u.y * ctheta; uz = -stheta * cpsi * temp + u.z * ctheta; u.x = ux; u.y = uy; u.z = uz; } /**** CHECK ROULETTE If photon weight below THRESHOLD, then terminate photon using Roulette technique. Photon has CHANCE probability of having its weight increased by factor of 1/CHANCE, and 1-CHANCE probability of terminating. *****/ if (pos.w < THRESHOLD) { if (RandomGen(seed) * CHANCE <= 1.f) pos.w *= CHANCE; else status = DEAD; } } while (status == ALIVE); /* end STEP_CHECK_HOP_SPIN */ /* if ALIVE, continue propagating */ /* If photon DEAD, then launch new photon. */ } //#pragma omp for } //#pragma omp parallel return; }
GB_binop__isge_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64) // A*D function (colscale): GB (_AxD__isge_uint64) // D*A function (rowscale): GB (_DxB__isge_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64) // C=scalar+B GB (_bind1st__isge_uint64) // C=scalar+B' GB (_bind1st_tran__isge_uint64) // C=A+scalar GB (_bind2nd__isge_uint64) // C=A'+scalar GB (_bind2nd_tran__isge_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64) // A*D function (colscale): GB (_AxD__isge_uint64) // D*A function (rowscale): GB (_DxB__isge_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64) // C=scalar+B GB (_bind1st__isge_uint64) // C=scalar+B' GB (_bind1st_tran__isge_uint64) // C=A+scalar GB (_bind2nd__isge_uint64) // C=A'+scalar GB (_bind2nd_tran__isge_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64) // A*D function (colscale): GB (_AxD__isge_uint64) // D*A function (rowscale): GB (_DxB__isge_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64) // C=scalar+B GB (_bind1st__isge_uint64) // C=scalar+B' GB (_bind1st_tran__isge_uint64) // C=A+scalar GB (_bind2nd__isge_uint64) // C=A'+scalar GB (_bind2nd_tran__isge_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lecuyer.h
/* * random number generator interface * L'Ecuyer, Mathematics of Computation, 65, pp 203-213 (96) */ /* Copyright (c) 2005, The Regents of the University of California. * All rights reserved. * This file is part of yorick (http://yorick.sourceforge.net). * Read the accompanying LICENSE file for details. */ /* __cplusplus is for version 2.0, c_plusplus for version 1.2 */ #ifdef __cplusplus extern "C" { #endif /* get random numbers between 0. and 1. one or n at a time */ extern double le_random(unsigned long *generator); extern void le_nrandom(unsigned long *generator, long n, double *r); /* get underlying random integer between 1 and 2^32-1 (4,294,967,295) */ extern unsigned long le_next(unsigned long *generator); /* seed the sequence with either double or long * -- 0 seed means reinitialize to default sequence * -- note that the complete state of the generator requires * three numbers, not one, so "seeding" can't reproduce * an arbitrary state -- copy the generator to do that */ extern void le_rseed(unsigned long *generator, double seed); extern void le_iseed(unsigned long *generator, unsigned long seed); /* above can all take generator==0, in which case, they use this one * -- if you create your own generator, none of the three values * can be 0; best to call one of the seed routines to initialize */ extern unsigned long le_generator[3]; #ifdef _OPENMP #pragma omp declare target /* get random numbers between 0. and 1. one or n at a time */ extern double le_random_omp45(unsigned long *generator); extern void le_nrandom_omp45(unsigned long *generator, long n, double *r); /* get underlying random integer between 1 and 2^32-1 (4,294,967,295) */ extern unsigned long le_next_omp45(unsigned long *generator); /* seed the sequence with either double or long * -- 0 seed means reinitialize to default sequence * -- note that the complete state of the generator requires * three numbers, not one, so "seeding" can't reproduce * an arbitrary state -- copy the generator to do that */ extern void le_rseed_omp45(unsigned long *generator, double seed); extern void le_iseed_omp45(unsigned long *generator, unsigned long seed); /* above can all take generator==0, in which case, they use this one * -- if you create your own generator, none of the three values * can be 0; best to call one of the seed routines to initialize */ extern unsigned long le_generator_omp45[3]; #pragma omp end declare target #endif #ifdef __cplusplus } #endif
/* __cplusplus is for version 2.0, c_plusplus for version 1.2 */ #ifdef __cplusplus extern "C" { #endif /* get random numbers between 0. and 1. one or n at a time */ extern double le_random(unsigned long *generator); extern void le_nrandom(unsigned long *generator, long n, double *r); /* get underlying random integer between 1 and 2^32-1 (4,294,967,295) */ extern unsigned long le_next(unsigned long *generator); /* * seed the sequence with either double or long -- 0 seed means * reinitialize to default sequence -- note that the complete state of * the generator requires three numbers, not one, so "seeding" can't * reproduce an arbitrary state -- copy the generator to do that */ extern void le_rseed(unsigned long *generator, double seed); extern void le_iseed(unsigned long *generator, unsigned long seed); /* * above can all take generator==0, in which case, they use this one -- * if you create your own generator, none of the three values can be 0; * best to call one of the seed routines to initialize */ extern unsigned long le_generator[3]; #ifdef __cplusplus } #endif
/* __cplusplus is for version 2.0, c_plusplus for version 1.2 */ #ifdef __cplusplus extern "C" { #endif /* get random numbers between 0. and 1. one or n at a time */ extern double le_random(unsigned long *generator); extern void le_nrandom(unsigned long *generator, long n, double *r); /* get underlying random integer between 1 and 2^32-1 (4,294,967,295) */ extern unsigned long le_next(unsigned long *generator); /* * seed the sequence with either double or long -- 0 seed means * reinitialize to default sequence -- note that the complete state of * the generator requires three numbers, not one, so "seeding" can't * reproduce an arbitrary state -- copy the generator to do that */ extern void le_rseed(unsigned long *generator, double seed); extern void le_iseed(unsigned long *generator, unsigned long seed); /* * above can all take generator==0, in which case, they use this one -- * if you create your own generator, none of the three values can be 0; * best to call one of the seed routines to initialize */ extern unsigned long le_generator[3]; #ifdef _OPENMP #pragma omp declare target /* get random numbers between 0. and 1. one or n at a time */ extern double le_random_omp45(unsigned long *generator); extern void le_nrandom_omp45(unsigned long *generator, long n, double *r); /* get underlying random integer between 1 and 2^32-1 (4,294,967,295) */ extern unsigned long le_next_omp45(unsigned long *generator); /* * seed the sequence with either double or long -- 0 seed means * reinitialize to default sequence -- note that the complete state of * the generator requires three numbers, not one, so "seeding" can't * reproduce an arbitrary state -- copy the generator to do that */ extern void le_rseed_omp45(unsigned long *generator, double seed); extern void le_iseed_omp45(unsigned long *generator, unsigned long seed); /* * above can all take generator==0, in which case, they use this one -- * if you create your own generator, none of the three values can be 0; * best to call one of the seed routines to initialize */ extern unsigned long le_generator_omp45[3]; #pragma omp end declare target #endif #ifdef __cplusplus } #endif
pi.c
#include <stdio.h> long long num_passos = 1000000000; double passo; int main(){ int i; double x, pi, soma=0.0; passo = 1.0/(double)num_passos; #pragma omp target map(tofrom:soma) #pragma omp teams distribute parallel for simd private(x) reduction(+:soma) for(i=0; i < num_passos; i++){ x = (i + 0.5)*passo; soma = soma + 4.0/(1.0 + x*x); } pi = soma*passo; printf("O valor de PI é: %f\n", pi); return 0; }
#include <stdio.h> long long num_passos = 1000000000; double passo; int main() { int i; double x, pi, soma = 0.0; passo = 1.0 / (double)num_passos; for (i = 0; i < num_passos; i++) { x = (i + 0.5) * passo; soma = soma + 4.0 / (1.0 + x * x); } pi = soma * passo; printf("O valor de PI é: %f\n", pi); return 0; }
#include <stdio.h> long long num_passos = 1000000000; double passo; int main() { int i; double x, pi, soma = 0.0; passo = 1.0 / (double)num_passos; #pragma omp target map(tofrom:soma) #pragma omp teams distribute parallel for simd private(x) reduction(+:soma) for (i = 0; i < num_passos; i++) { x = (i + 0.5) * passo; soma = soma + 4.0 / (1.0 + x * x); } pi = soma * passo; printf("O valor de PI é: %f\n", pi); return 0; }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLab(const double red,const double green, const double blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static void ConvertRGBToLuv(const double red,const double green, const double blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(image,ClampToQuantum(GetPixelIntensity(image,q)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; y_map[i].x=0.010566*i; z_map[i].x=0.002052*i; x_map[i].y=(-0.003296)*i; y_map[i].y=(-0.006471)*i; z_map[i].y=0.009768*i; x_map[i].z=0.009410*i; y_map[i].z=(-0.007880)*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_sRGBTransformImage) #endif proceed=SetImageProgress(image,sRGBTransformImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image)) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); if (IdentifyImageMonochrome(image,exception) == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == LinearGRAYColorspace) return(GrayscaleImage(image,Rec709LuminancePixelIntensityMethod,exception)); if (colorspace == GRAYColorspace) return(GrayscaleImage(image,Rec709LumaPixelIntensityMethod,exception)); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,double *red,double *green,double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(image,q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformsRGBImage) #endif proceed=SetImageProgress(image,TransformsRGBImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* * Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* * Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e C o l o r s p a c e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageColorspaceType() returns the potential type of image: % * sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To * ensure the image type matches its potential, use SetImageColorspaceType(): * % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), * % exception); % % The format of the GetImageColorspaceType method * is: % % ColorspaceType GetImageColorspaceType(const Image *image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image * image, ExceptionInfo * exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); colorspace = image->colorspace; type = IdentifyImageType(image, exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) colorspace = GRAYColorspace; return (colorspace); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + s R G B T r a n s f o r m I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % sRGBTransformImage() converts the reference image from sRGB to * an alternate % colorspace. The transformation matrices are not the * standard ones: the % weights are rescaled to normalized the range of the * transformed values to % be [0..QuantumRange]. % % The format of the * sRGBTransformImage method is: % % MagickBooleanType * sRGBTransformImage(Image *image, % const ColorspaceType * colorspace,EsceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o colorspace: the colorspace to * transform the image to. % % o exception: return any errors or warnings * in this structure. % */ static inline void ConvertRGBToCMY(const double red, const double green, const double blue, double *cyan, double *magenta, double *yellow) { *cyan = QuantumScale * (QuantumRange - red); *magenta = QuantumScale * (QuantumRange - green); *yellow = QuantumScale * (QuantumRange - blue); } static inline void ConvertXYZToLMS(const double x, const double y, const double z, double *L, double *M, double *S) { *L = 0.7328 * x + 0.4296 * y - 0.1624 * z; *M = (-0.7036 * x + 1.6975 * y + 0.0061 * z); *S = 0.0030 * x + 0.0136 * y + 0.9834 * z; } static void ConvertRGBToLMS(const double red, const double green, const double blue, double *L, double *M, double *S) { double X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); ConvertXYZToLMS(X, Y, Z, L, M, S); } static void ConvertRGBToLab(const double red, const double green, const double blue, double *L, double *a, double *b) { double X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); ConvertXYZToLab(X, Y, Z, L, a, b); } static void ConvertRGBToLuv(const double red, const double green, const double blue, double *L, double *u, double *v) { double X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); ConvertXYZToLuv(X, Y, Z, L, u, v); } static void ConvertRGBToxyY(const double red, const double green, const double blue, double *low_x, double *low_y, double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); gamma = PerceptibleReciprocal(X + Y + Z); *low_x = gamma * X; *low_y = gamma * Y; *cap_Y = Y; } static void ConvertRGBToYDbDr(const double red, const double green, const double blue, double *Y, double *Db, double *Dr) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *Db = QuantumScale * (-0.450 * red - 0.883 * green + 1.333 * blue) + 0.5; *Dr = QuantumScale * (-1.333 * red + 1.116 * green + 0.217 * blue) + 0.5; } static void ConvertRGBToYIQ(const double red, const double green, const double blue, double *Y, double *I, double *Q) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *I = QuantumScale * (0.595716 * red - 0.274453 * green - 0.321263 * blue) + 0.5; *Q = QuantumScale * (0.211456 * red - 0.522591 * green + 0.311135 * blue) + 0.5; } static void ConvertRGBToYPbPr(const double red, const double green, const double blue, double *Y, double *Pb, double *Pr) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *Pb = QuantumScale * ((-0.1687367) * red - 0.331264 * green + 0.5 * blue) + 0.5; *Pr = QuantumScale * (0.5 * red - 0.418688 * green - 0.081312 * blue) + 0.5; } static void ConvertRGBToYCbCr(const double red, const double green, const double blue, double *Y, double *Cb, double *Cr) { ConvertRGBToYPbPr(red, green, blue, Y, Cb, Cr); } static void ConvertRGBToYUV(const double red, const double green, const double blue, double *Y, double *U, double *V) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *U = QuantumScale * ((-0.147) * red - 0.289 * green + 0.436 * blue) + 0.5; *V = QuantumScale * (0.615 * red - 0.515 * green - 0.100 * blue) + 0.5; } static MagickBooleanType sRGBTransformImage(Image * image, const ColorspaceType colorspace, ExceptionInfo * exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket * x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status = MagickTrue; progress = 0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* * Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->type = image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LinearGRAYColorspace: case GRAYColorspace: { /* * Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGray(image, ClampToQuantum(GetPixelIntensity(image, q)), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); image->type = GrayscaleType; return (status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* * Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red = (double)GetPixelRed(image, q); green = (double)GetPixelGreen(image, q); blue = (double)GetPixelBlue(image, q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red, green, blue, &X, &Y, &Z); break; } case HCLColorspace: { ConvertRGBToHCL(red, green, blue, &X, &Y, &Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red, green, blue, &X, &Y, &Z); break; } case HSBColorspace: { ConvertRGBToHSB(red, green, blue, &X, &Y, &Z); break; } case HSIColorspace: { ConvertRGBToHSI(red, green, blue, &X, &Y, &Z); break; } case HSLColorspace: { ConvertRGBToHSL(red, green, blue, &X, &Y, &Z); break; } case HSVColorspace: { ConvertRGBToHSV(red, green, blue, &X, &Y, &Z); break; } case HWBColorspace: { ConvertRGBToHWB(red, green, blue, &X, &Y, &Z); break; } case LabColorspace: { ConvertRGBToLab(red, green, blue, &X, &Y, &Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red, green, blue, &X, &Y, &Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red, green, blue, &X, &Y, &Z); break; } case LMSColorspace: { ConvertRGBToLMS(red, green, blue, &X, &Y, &Z); break; } case LuvColorspace: { ConvertRGBToLuv(red, green, blue, &X, &Y, &Z); break; } case xyYColorspace: { ConvertRGBToxyY(red, green, blue, &X, &Y, &Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red, green, blue, &X, &Y, &Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red, green, blue, &X, &Y, &Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red, green, blue, &X, &Y, &Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red, green, blue, &X, &Y, &Z); break; } case YUVColorspace: { ConvertRGBToYUV(red, green, blue, &X, &Y, &Z); break; } default: { X = QuantumScale * red; Y = QuantumScale * green; Z = QuantumScale * blue; break; } } SetPixelRed(image, ClampToQuantum(QuantumRange * X), q); SetPixelGreen(image, ClampToQuantum(QuantumRange * Y), q); SetPixelBlue(image, ClampToQuantum(QuantumRange * Z), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum * logmap; /* * Transform RGB to Log colorspace. */ density = DisplayGamma; gamma = DisplayGamma; value = GetImageProperty(image, "gamma", exception); if (value != (const char *)NULL) gamma = PerceptibleReciprocal(StringToDouble(value, (char **)NULL)); film_gamma = FilmGamma; value = GetImageProperty(image, "film-gamma", exception); if (value != (const char *)NULL) film_gamma = StringToDouble(value, (char **)NULL); reference_black = ReferenceBlack; value = GetImageProperty(image, "reference-black", exception); if (value != (const char *)NULL) reference_black = StringToDouble(value, (char **)NULL); reference_white = ReferenceWhite; value = GetImageProperty(image, "reference-white", exception); if (value != (const char *)NULL) reference_white = StringToDouble(value, (char **)NULL); logmap = (Quantum *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); black = pow(10.0, (reference_black - reference_white) * (gamma / density) * 0.002 / film_gamma); for (i = 0; i <= (ssize_t) MaxMap; i++) logmap[i] = ScaleMapToQuantum((double)(MaxMap * (reference_white + log10(black + (1.0 * i / MaxMap) * (1.0 - black)) / ((gamma / density) * 0.002 / film_gamma)) / 1024.0)); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { double blue, green, red; red = (double)DecodePixelGamma((MagickRealType) GetPixelRed(image, q)); green = (double)DecodePixelGamma((MagickRealType) GetPixelGreen(image, q)); blue = (double)DecodePixelGamma((MagickRealType) GetPixelBlue(image, q)); SetPixelRed(image, logmap[ScaleQuantumToMap(ClampToQuantum(red))], q); SetPixelGreen(image, logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image, logmap[ScaleQuantumToMap(ClampToQuantum(blue))], q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); logmap = (Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case RGBColorspace: case scRGBColorspace: { /* * Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red; red = DecodePixelGamma((MagickRealType) GetPixelRed(image, q)); green = DecodePixelGamma((MagickRealType) GetPixelGreen(image, q)); blue = DecodePixelGamma((MagickRealType) GetPixelBlue(image, q)); SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } default: break; } /* * Allocate the tables. */ x_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*x_map)); y_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*y_map)); z_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map = (TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map = (TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map = (TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(&primary_info, 0, sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* * Initialize OHTA tables: * * I1 = 0.33333*R+0.33334*G+0.33333*B I2 = * 0.50000*R+0.00000*G-0.50000*B I3 * =-0.25000*R+0.50000*G-0.25000*B * * I and Q, normally -0.5 through 0.5, are normalized to the range 0 * through QuantumRange. */ primary_info.y = (double)(MaxMap + 1.0) / 2.0; primary_info.z = (double)(MaxMap + 1.0) / 2.0; for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (0.33333 * (double)i); y_map[i].x = (MagickRealType) (0.33334 * (double)i); z_map[i].x = (MagickRealType) (0.33333 * (double)i); x_map[i].y = (MagickRealType) (0.50000 * (double)i); y_map[i].y = (MagickRealType) (0.00000 * (double)i); z_map[i].y = (MagickRealType) (-0.50000 * (double)i); x_map[i].z = (MagickRealType) (-0.25000 * (double)i); y_map[i].z = (MagickRealType) (0.50000 * (double)i); z_map[i].z = (MagickRealType) (-0.25000 * (double)i); } break; } case Rec601YCbCrColorspace: { /* * Initialize YCbCr tables (ITU-R BT.601): * * Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= * -0.1687367*R-0.3312640*G+0.5000000*B Cr= * 0.5000000*R-0.4186880*G-0.0813120*B * * Cb and Cr, normally -0.5 through 0.5, are normalized to the range * 0 through QuantumRange. */ primary_info.y = (double)(MaxMap + 1.0) / 2.0; primary_info.z = (double)(MaxMap + 1.0) / 2.0; for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (0.298839 * (double)i); y_map[i].x = (MagickRealType) (0.586811 * (double)i); z_map[i].x = (MagickRealType) (0.114350 * (double)i); x_map[i].y = (MagickRealType) (-0.1687367 * (double)i); y_map[i].y = (MagickRealType) (-0.331264 * (double)i); z_map[i].y = (MagickRealType) (0.500000 * (double)i); x_map[i].z = (MagickRealType) (0.500000 * (double)i); y_map[i].z = (MagickRealType) (-0.418688 * (double)i); z_map[i].z = (MagickRealType) (-0.081312 * (double)i); } break; } case Rec709YCbCrColorspace: { /* * Initialize YCbCr tables (ITU-R BT.709): * * Y = 0.212656*R+0.715158*G+0.072186*B Cb= * -0.114572*R-0.385428*G+0.500000*B Cr= * 0.500000*R-0.454153*G-0.045847*B * * Cb and Cr, normally -0.5 through 0.5, are normalized to the range * 0 through QuantumRange. */ primary_info.y = (double)(MaxMap + 1.0) / 2.0; primary_info.z = (double)(MaxMap + 1.0) / 2.0; for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (0.212656 * (double)i); y_map[i].x = (MagickRealType) (0.715158 * (double)i); z_map[i].x = (MagickRealType) (0.072186 * (double)i); x_map[i].y = (MagickRealType) (-0.114572 * (double)i); y_map[i].y = (MagickRealType) (-0.385428 * (double)i); z_map[i].y = (MagickRealType) (0.500000 * (double)i); x_map[i].z = (MagickRealType) (0.500000 * (double)i); y_map[i].z = (MagickRealType) (-0.454153 * (double)i); z_map[i].z = (MagickRealType) (-0.045847 * (double)i); } break; } case YCCColorspace: { /* * Initialize YCC tables: * * Y = 0.298839*R+0.586811*G+0.114350*B C1= * -0.298839*R-0.586811*G+0.88600*B C2= * 0.70100*R-0.586811*G-0.114350*B * * YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y = (double)ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z = (double)ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i = 0; i <= (ssize_t) (0.018 * MaxMap); i++) { x_map[i].x = 0.005382 * i; y_map[i].x = 0.010566 * i; z_map[i].x = 0.002052 * i; x_map[i].y = (-0.003296) * i; y_map[i].y = (-0.006471) * i; z_map[i].y = 0.009768 * i; x_map[i].z = 0.009410 * i; y_map[i].z = (-0.007880) * i; z_map[i].z = (-0.001530) * i; } for (; i <= (ssize_t) MaxMap; i++) { x_map[i].x = 0.298839 * (1.099 * i - 0.099); y_map[i].x = 0.586811 * (1.099 * i - 0.099); z_map[i].x = 0.114350 * (1.099 * i - 0.099); x_map[i].y = (-0.298839) * (1.099 * i - 0.099); y_map[i].y = (-0.586811) * (1.099 * i - 0.099); z_map[i].y = 0.88600 * (1.099 * i - 0.099); x_map[i].z = 0.70100 * (1.099 * i - 0.099); y_map[i].z = (-0.586811) * (1.099 * i - 0.099); z_map[i].z = (-0.114350) * (1.099 * i - 0.099); } break; } default: { /* * Linear conversion tables. */ for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * (double)i); y_map[i].x = (MagickRealType) 0.0; z_map[i].x = (MagickRealType) 0.0; x_map[i].y = (MagickRealType) 0.0; y_map[i].y = (MagickRealType) (1.0 * (double)i); z_map[i].y = (MagickRealType) 0.0; x_map[i].z = (MagickRealType) 0.0; y_map[i].z = (MagickRealType) 0.0; z_map[i].z = (MagickRealType) (1.0 * (double)i); } break; } } /* * Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* * Convert DirectClass image. */ image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum * magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { red = ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image, q))); green = ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image, q))); blue = ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image, q))); pixel.red = (x_map[red].x + y_map[green].x + z_map[blue].x) + primary_info.x; pixel.green = (x_map[red].y + y_map[green].y + z_map[blue].y) + primary_info.y; pixel.blue = (x_map[red].z + y_map[green].z + z_map[blue].z) + primary_info.z; SetPixelRed(image, ScaleMapToQuantum(pixel.red), q); SetPixelGreen(image, ScaleMapToQuantum(pixel.green), q); SetPixelBlue(image, ScaleMapToQuantum(pixel.blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, sRGBTransformImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* * Convert PseudoClass image. */ for (i = 0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red = x_map[red].x + y_map[green].x + z_map[blue].x + primary_info.x; pixel.green = x_map[red].y + y_map[green].y + z_map[blue].y + primary_info.y; pixel.blue = x_map[red].z + y_map[green].z + z_map[blue].z + primary_info.z; image->colormap[i].red = (double)ScaleMapToQuantum(pixel.red); image->colormap[i].green = (double)ScaleMapToQuantum(pixel.green); image->colormap[i].blue = (double)ScaleMapToQuantum(pixel.blue); } (void)SyncImage(image, exception); break; } } /* * Relinquish resources. */ z_map = (TransformPacket *) RelinquishMagickMemory(z_map); y_map = (TransformPacket *) RelinquishMagickMemory(y_map); x_map = (TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e C o l o r s p a c e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageColorspace() sets the colorspace member of the Image * structure. % % The format of the SetImageColorspace method is: % % * MagickBooleanType SetImageColorspace(Image *image, % const * ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o colorspace: * the colorspace. % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image * image, const ColorspaceType colorspace, ExceptionInfo * exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return (MagickTrue); image->colorspace = colorspace; image->rendering_intent = UndefinedIntent; image->gamma = 1.000 / 2.200; (void)memset(&image->chromaticity, 0, sizeof(image->chromaticity)); type = image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma = 1.000; type = GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma = 1.000; else { image->rendering_intent = PerceptualIntent; image->chromaticity.red_primary.x = 0.6400; image->chromaticity.red_primary.y = 0.3300; image->chromaticity.red_primary.z = 0.0300; image->chromaticity.green_primary.x = 0.3000; image->chromaticity.green_primary.y = 0.6000; image->chromaticity.green_primary.z = 0.1000; image->chromaticity.blue_primary.x = 0.1500; image->chromaticity.blue_primary.y = 0.0600; image->chromaticity.blue_primary.z = 0.7900; image->chromaticity.white_point.x = 0.3127; image->chromaticity.white_point.y = 0.3290; image->chromaticity.white_point.z = 0.3583; } status = SyncImagePixelCache(image, exception); image->type = type; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e G r a y * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageGray() returns MagickTrue if all the pixels in the image * have the % same red, green, and blue intensities and changes the type of * the image to % bi-level or grayscale. % % The format of the SetImageGray * method is: % % MagickBooleanType SetImageGray(const Image *image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image * image, ExceptionInfo * exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (IsImageGray(image)) return (MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (MagickFalse); value = GetImageProperty(image, "colorspace:auto-grayscale", exception); if (IsStringFalse(value) != MagickFalse) return (MagickFalse); type = IdentifyImageGray(image, exception); if (type == UndefinedType) return (MagickFalse); image->colorspace = GRAYColorspace; if (SyncImagePixelCache((Image *) image, exception) == MagickFalse) return (MagickFalse); image->type = type; return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e M o n o c h r o m e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the * image have % the same red, green, and blue intensities and the intensity * is either % 0 or QuantumRange and changes the type of the image to * bi-level. % % The format of the SetImageMonochrome method is: % % * MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image * image, ExceptionInfo * exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->type == BilevelType) return (MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (MagickFalse); value = GetImageProperty(image, "colorspace:auto-grayscale", exception); if (IsStringFalse(value) != MagickFalse) return (MagickFalse); if (IdentifyImageMonochrome(image, exception) == MagickFalse) return (MagickFalse); image->colorspace = GRAYColorspace; if (SyncImagePixelCache((Image *) image, exception) == MagickFalse) return (MagickFalse); image->type = BilevelType; return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % T r a n s f o r m I m a g e C o l o r s p a c e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TransformImageColorspace() transforms an image colorspace, * changing the % image data to reflect the new colorspace. % % The format * of the TransformImageColorspace method is: % % MagickBooleanType * TransformImageColorspace(Image *image, % const ColorspaceType * colorspace,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o colorspace: the colorspace. % * % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image * image, const ColorspaceType colorspace, ExceptionInfo * exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->colorspace == colorspace) return (SetImageColorspace(image, colorspace, exception)); (void)DeleteImageProfile(image, "icc"); (void)DeleteImageProfile(image, "icm"); if (colorspace == LinearGRAYColorspace) return (GrayscaleImage(image, Rec709LuminancePixelIntensityMethod, exception)); if (colorspace == GRAYColorspace) return (GrayscaleImage(image, Rec709LumaPixelIntensityMethod, exception)); if (colorspace == UndefinedColorspace) return (SetImageColorspace(image, colorspace, exception)); /* * Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return (TransformsRGBImage(image, exception)); status = MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status = TransformsRGBImage(image, exception); if (status == MagickFalse) return (status); /* * Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image, colorspace, exception) == MagickFalse) status = MagickFalse; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + T r a n s f o r m s R G B I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TransformsRGBImage() converts the reference image from an * alternate % colorspace to sRGB. The transformation matrices are not the * standard ones: % the weights are rescaled to normalize the range of the * transformed values % to be [0..QuantumRange]. % % The format of the * TransformsRGBImage method is: % % MagickBooleanType * TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan, const double magenta, const double yellow, double *red, double *green, double *blue) { *red = QuantumRange * (1.0 - cyan); *green = QuantumRange * (1.0 - magenta); *blue = QuantumRange * (1.0 - yellow); } static inline void ConvertLMSToXYZ(const double L, const double M, const double S, double *X, double *Y, double *Z) { *X = 1.096123820835514 * L - 0.278869000218287 * M + 0.182745179382773 * S; *Y = 0.454369041975359 * L + 0.473533154307412 * M + 0.072097803717229 * S; *Z = (-0.009627608738429) * L - 0.005698031216113 * M + 1.015325639954543 * S; } static inline void ConvertLMSToRGB(const double L, const double M, const double S, double *red, double *green, double *blue) { double X, Y, Z; ConvertLMSToXYZ(L, M, S, &X, &Y, &Z); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static inline void ConvertLuvToRGB(const double L, const double u, const double v, double *red, double *green, double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0 * L, 354.0 * u - 134.0, 262.0 * v - 140.0, &X, &Y, &Z); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return (0); if (value >= 1388.0) return (1388); return ((ssize_t) (value + 0.5)); } static inline void ConvertLabToRGB(const double L, const double a, const double b, double *red, double *green, double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0 * L, 255.0 * (a - 0.5), 255.0 * (b - 0.5), &X, &Y, &Z); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static inline void ConvertxyYToRGB(const double low_x, const double low_y, const double cap_Y, double *red, double *green, double *blue) { double gamma, X, Y, Z; gamma = PerceptibleReciprocal(low_y); X = gamma * cap_Y * low_x; Y = cap_Y; Z = gamma * cap_Y * (1.0 - low_x - low_y); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static void ConvertYPbPrToRGB(const double Y, const double Pb, const double Pr, double *red, double *green, double *blue) { *red = QuantumRange * (0.99999999999914679361 * Y - 1.2188941887145875e-06 * (Pb - 0.5) + 1.4019995886561440468 * (Pr - 0.5)); *green = QuantumRange * (0.99999975910502514331 * Y - 0.34413567816504303521 * (Pb - 0.5) - 0.71413649331646789076 * (Pr - 0.5)); *blue = QuantumRange * (1.00000124040004623180 * Y + 1.77200006607230409200 * (Pb - 0.5) + 2.1453384174593273e-06 * (Pr - 0.5)); } static void ConvertYCbCrToRGB(const double Y, const double Cb, const double Cr, double *red, double *green, double *blue) { ConvertYPbPrToRGB(Y, Cb, Cr, red, green, blue); } static void ConvertYIQToRGB(const double Y, const double I, const double Q, double *red, double *green, double *blue) { *red = QuantumRange * (Y + 0.9562957197589482261 * (I - 0.5) + 0.6210244164652610754 * (Q - 0.5)); *green = QuantumRange * (Y - 0.2721220993185104464 * (I - 0.5) - 0.6473805968256950427 * (Q - 0.5)); *blue = QuantumRange * (Y - 1.1069890167364901945 * (I - 0.5) + 1.7046149983646481374 * (Q - 0.5)); } static void ConvertYDbDrToRGB(const double Y, const double Db, const double Dr, double *red, double *green, double *blue) { *red = QuantumRange * (Y + 9.2303716147657e-05 * (Db - 0.5) - 0.52591263066186533 * (Dr - 0.5)); *green = QuantumRange * (Y - 0.12913289889050927 * (Db - 0.5) + 0.26789932820759876 * (Dr - 0.5)); *blue = QuantumRange * (Y + 0.66467905997895482 * (Db - 0.5) - 7.9202543533108e-05 * (Dr - 0.5)); } static void ConvertYUVToRGB(const double Y, const double U, const double V, double *red, double *green, double *blue) { *red = QuantumRange * (Y - 3.945707070708279e-05 * (U - 0.5) + 1.1398279671717170825 * (V - 0.5)); *green = QuantumRange * (Y - 0.3946101641414141437 * (U - 0.5) - 0.5805003156565656797 * (V - 0.5)); *blue = QuantumRange * (Y + 2.0319996843434342537 * (U - 0.5) - 4.813762626262513e-04 * (V - 0.5)); } static MagickBooleanType TransformsRGBImage(Image * image, ExceptionInfo * exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000 f, 0.000720 f, 0.001441 f, 0.002161 f, 0.002882 f, 0.003602 f, 0.004323 f, 0.005043 f, 0.005764 f, 0.006484 f, 0.007205 f, 0.007925 f, 0.008646 f, 0.009366 f, 0.010086 f, 0.010807 f, 0.011527 f, 0.012248 f, 0.012968 f, 0.013689 f, 0.014409 f, 0.015130 f, 0.015850 f, 0.016571 f, 0.017291 f, 0.018012 f, 0.018732 f, 0.019452 f, 0.020173 f, 0.020893 f, 0.021614 f, 0.022334 f, 0.023055 f, 0.023775 f, 0.024496 f, 0.025216 f, 0.025937 f, 0.026657 f, 0.027378 f, 0.028098 f, 0.028818 f, 0.029539 f, 0.030259 f, 0.030980 f, 0.031700 f, 0.032421 f, 0.033141 f, 0.033862 f, 0.034582 f, 0.035303 f, 0.036023 f, 0.036744 f, 0.037464 f, 0.038184 f, 0.038905 f, 0.039625 f, 0.040346 f, 0.041066 f, 0.041787 f, 0.042507 f, 0.043228 f, 0.043948 f, 0.044669 f, 0.045389 f, 0.046110 f, 0.046830 f, 0.047550 f, 0.048271 f, 0.048991 f, 0.049712 f, 0.050432 f, 0.051153 f, 0.051873 f, 0.052594 f, 0.053314 f, 0.054035 f, 0.054755 f, 0.055476 f, 0.056196 f, 0.056916 f, 0.057637 f, 0.058357 f, 0.059078 f, 0.059798 f, 0.060519 f, 0.061239 f, 0.061960 f, 0.062680 f, 0.063401 f, 0.064121 f, 0.064842 f, 0.065562 f, 0.066282 f, 0.067003 f, 0.067723 f, 0.068444 f, 0.069164 f, 0.069885 f, 0.070605 f, 0.071326 f, 0.072046 f, 0.072767 f, 0.073487 f, 0.074207 f, 0.074928 f, 0.075648 f, 0.076369 f, 0.077089 f, 0.077810 f, 0.078530 f, 0.079251 f, 0.079971 f, 0.080692 f, 0.081412 f, 0.082133 f, 0.082853 f, 0.083573 f, 0.084294 f, 0.085014 f, 0.085735 f, 0.086455 f, 0.087176 f, 0.087896 f, 0.088617 f, 0.089337 f, 0.090058 f, 0.090778 f, 0.091499 f, 0.092219 f, 0.092939 f, 0.093660 f, 0.094380 f, 0.095101 f, 0.095821 f, 0.096542 f, 0.097262 f, 0.097983 f, 0.098703 f, 0.099424 f, 0.100144 f, 0.100865 f, 0.101585 f, 0.102305 f, 0.103026 f, 0.103746 f, 0.104467 f, 0.105187 f, 0.105908 f, 0.106628 f, 0.107349 f, 0.108069 f, 0.108790 f, 0.109510 f, 0.110231 f, 0.110951 f, 0.111671 f, 0.112392 f, 0.113112 f, 0.113833 f, 0.114553 f, 0.115274 f, 0.115994 f, 0.116715 f, 0.117435 f, 0.118156 f, 0.118876 f, 0.119597 f, 0.120317 f, 0.121037 f, 0.121758 f, 0.122478 f, 0.123199 f, 0.123919 f, 0.124640 f, 0.125360 f, 0.126081 f, 0.126801 f, 0.127522 f, 0.128242 f, 0.128963 f, 0.129683 f, 0.130403 f, 0.131124 f, 0.131844 f, 0.132565 f, 0.133285 f, 0.134006 f, 0.134726 f, 0.135447 f, 0.136167 f, 0.136888 f, 0.137608 f, 0.138329 f, 0.139049 f, 0.139769 f, 0.140490 f, 0.141210 f, 0.141931 f, 0.142651 f, 0.143372 f, 0.144092 f, 0.144813 f, 0.145533 f, 0.146254 f, 0.146974 f, 0.147695 f, 0.148415 f, 0.149135 f, 0.149856 f, 0.150576 f, 0.151297 f, 0.152017 f, 0.152738 f, 0.153458 f, 0.154179 f, 0.154899 f, 0.155620 f, 0.156340 f, 0.157061 f, 0.157781 f, 0.158501 f, 0.159222 f, 0.159942 f, 0.160663 f, 0.161383 f, 0.162104 f, 0.162824 f, 0.163545 f, 0.164265 f, 0.164986 f, 0.165706 f, 0.166427 f, 0.167147 f, 0.167867 f, 0.168588 f, 0.169308 f, 0.170029 f, 0.170749 f, 0.171470 f, 0.172190 f, 0.172911 f, 0.173631 f, 0.174352 f, 0.175072 f, 0.175793 f, 0.176513 f, 0.177233 f, 0.177954 f, 0.178674 f, 0.179395 f, 0.180115 f, 0.180836 f, 0.181556 f, 0.182277 f, 0.182997 f, 0.183718 f, 0.184438 f, 0.185159 f, 0.185879 f, 0.186599 f, 0.187320 f, 0.188040 f, 0.188761 f, 0.189481 f, 0.190202 f, 0.190922 f, 0.191643 f, 0.192363 f, 0.193084 f, 0.193804 f, 0.194524 f, 0.195245 f, 0.195965 f, 0.196686 f, 0.197406 f, 0.198127 f, 0.198847 f, 0.199568 f, 0.200288 f, 0.201009 f, 0.201729 f, 0.202450 f, 0.203170 f, 0.203890 f, 0.204611 f, 0.205331 f, 0.206052 f, 0.206772 f, 0.207493 f, 0.208213 f, 0.208934 f, 0.209654 f, 0.210375 f, 0.211095 f, 0.211816 f, 0.212536 f, 0.213256 f, 0.213977 f, 0.214697 f, 0.215418 f, 0.216138 f, 0.216859 f, 0.217579 f, 0.218300 f, 0.219020 f, 0.219741 f, 0.220461 f, 0.221182 f, 0.221902 f, 0.222622 f, 0.223343 f, 0.224063 f, 0.224784 f, 0.225504 f, 0.226225 f, 0.226945 f, 0.227666 f, 0.228386 f, 0.229107 f, 0.229827 f, 0.230548 f, 0.231268 f, 0.231988 f, 0.232709 f, 0.233429 f, 0.234150 f, 0.234870 f, 0.235591 f, 0.236311 f, 0.237032 f, 0.237752 f, 0.238473 f, 0.239193 f, 0.239914 f, 0.240634 f, 0.241354 f, 0.242075 f, 0.242795 f, 0.243516 f, 0.244236 f, 0.244957 f, 0.245677 f, 0.246398 f, 0.247118 f, 0.247839 f, 0.248559 f, 0.249280 f, 0.250000 f, 0.250720 f, 0.251441 f, 0.252161 f, 0.252882 f, 0.253602 f, 0.254323 f, 0.255043 f, 0.255764 f, 0.256484 f, 0.257205 f, 0.257925 f, 0.258646 f, 0.259366 f, 0.260086 f, 0.260807 f, 0.261527 f, 0.262248 f, 0.262968 f, 0.263689 f, 0.264409 f, 0.265130 f, 0.265850 f, 0.266571 f, 0.267291 f, 0.268012 f, 0.268732 f, 0.269452 f, 0.270173 f, 0.270893 f, 0.271614 f, 0.272334 f, 0.273055 f, 0.273775 f, 0.274496 f, 0.275216 f, 0.275937 f, 0.276657 f, 0.277378 f, 0.278098 f, 0.278818 f, 0.279539 f, 0.280259 f, 0.280980 f, 0.281700 f, 0.282421 f, 0.283141 f, 0.283862 f, 0.284582 f, 0.285303 f, 0.286023 f, 0.286744 f, 0.287464 f, 0.288184 f, 0.288905 f, 0.289625 f, 0.290346 f, 0.291066 f, 0.291787 f, 0.292507 f, 0.293228 f, 0.293948 f, 0.294669 f, 0.295389 f, 0.296109 f, 0.296830 f, 0.297550 f, 0.298271 f, 0.298991 f, 0.299712 f, 0.300432 f, 0.301153 f, 0.301873 f, 0.302594 f, 0.303314 f, 0.304035 f, 0.304755 f, 0.305476 f, 0.306196 f, 0.306916 f, 0.307637 f, 0.308357 f, 0.309078 f, 0.309798 f, 0.310519 f, 0.311239 f, 0.311960 f, 0.312680 f, 0.313401 f, 0.314121 f, 0.314842 f, 0.315562 f, 0.316282 f, 0.317003 f, 0.317723 f, 0.318444 f, 0.319164 f, 0.319885 f, 0.320605 f, 0.321326 f, 0.322046 f, 0.322767 f, 0.323487 f, 0.324207 f, 0.324928 f, 0.325648 f, 0.326369 f, 0.327089 f, 0.327810 f, 0.328530 f, 0.329251 f, 0.329971 f, 0.330692 f, 0.331412 f, 0.332133 f, 0.332853 f, 0.333573 f, 0.334294 f, 0.335014 f, 0.335735 f, 0.336455 f, 0.337176 f, 0.337896 f, 0.338617 f, 0.339337 f, 0.340058 f, 0.340778 f, 0.341499 f, 0.342219 f, 0.342939 f, 0.343660 f, 0.344380 f, 0.345101 f, 0.345821 f, 0.346542 f, 0.347262 f, 0.347983 f, 0.348703 f, 0.349424 f, 0.350144 f, 0.350865 f, 0.351585 f, 0.352305 f, 0.353026 f, 0.353746 f, 0.354467 f, 0.355187 f, 0.355908 f, 0.356628 f, 0.357349 f, 0.358069 f, 0.358790 f, 0.359510 f, 0.360231 f, 0.360951 f, 0.361671 f, 0.362392 f, 0.363112 f, 0.363833 f, 0.364553 f, 0.365274 f, 0.365994 f, 0.366715 f, 0.367435 f, 0.368156 f, 0.368876 f, 0.369597 f, 0.370317 f, 0.371037 f, 0.371758 f, 0.372478 f, 0.373199 f, 0.373919 f, 0.374640 f, 0.375360 f, 0.376081 f, 0.376801 f, 0.377522 f, 0.378242 f, 0.378963 f, 0.379683 f, 0.380403 f, 0.381124 f, 0.381844 f, 0.382565 f, 0.383285 f, 0.384006 f, 0.384726 f, 0.385447 f, 0.386167 f, 0.386888 f, 0.387608 f, 0.388329 f, 0.389049 f, 0.389769 f, 0.390490 f, 0.391210 f, 0.391931 f, 0.392651 f, 0.393372 f, 0.394092 f, 0.394813 f, 0.395533 f, 0.396254 f, 0.396974 f, 0.397695 f, 0.398415 f, 0.399135 f, 0.399856 f, 0.400576 f, 0.401297 f, 0.402017 f, 0.402738 f, 0.403458 f, 0.404179 f, 0.404899 f, 0.405620 f, 0.406340 f, 0.407061 f, 0.407781 f, 0.408501 f, 0.409222 f, 0.409942 f, 0.410663 f, 0.411383 f, 0.412104 f, 0.412824 f, 0.413545 f, 0.414265 f, 0.414986 f, 0.415706 f, 0.416427 f, 0.417147 f, 0.417867 f, 0.418588 f, 0.419308 f, 0.420029 f, 0.420749 f, 0.421470 f, 0.422190 f, 0.422911 f, 0.423631 f, 0.424352 f, 0.425072 f, 0.425793 f, 0.426513 f, 0.427233 f, 0.427954 f, 0.428674 f, 0.429395 f, 0.430115 f, 0.430836 f, 0.431556 f, 0.432277 f, 0.432997 f, 0.433718 f, 0.434438 f, 0.435158 f, 0.435879 f, 0.436599 f, 0.437320 f, 0.438040 f, 0.438761 f, 0.439481 f, 0.440202 f, 0.440922 f, 0.441643 f, 0.442363 f, 0.443084 f, 0.443804 f, 0.444524 f, 0.445245 f, 0.445965 f, 0.446686 f, 0.447406 f, 0.448127 f, 0.448847 f, 0.449568 f, 0.450288 f, 0.451009 f, 0.451729 f, 0.452450 f, 0.453170 f, 0.453891 f, 0.454611 f, 0.455331 f, 0.456052 f, 0.456772 f, 0.457493 f, 0.458213 f, 0.458934 f, 0.459654 f, 0.460375 f, 0.461095 f, 0.461816 f, 0.462536 f, 0.463256 f, 0.463977 f, 0.464697 f, 0.465418 f, 0.466138 f, 0.466859 f, 0.467579 f, 0.468300 f, 0.469020 f, 0.469741 f, 0.470461 f, 0.471182 f, 0.471902 f, 0.472622 f, 0.473343 f, 0.474063 f, 0.474784 f, 0.475504 f, 0.476225 f, 0.476945 f, 0.477666 f, 0.478386 f, 0.479107 f, 0.479827 f, 0.480548 f, 0.481268 f, 0.481988 f, 0.482709 f, 0.483429 f, 0.484150 f, 0.484870 f, 0.485591 f, 0.486311 f, 0.487032 f, 0.487752 f, 0.488473 f, 0.489193 f, 0.489914 f, 0.490634 f, 0.491354 f, 0.492075 f, 0.492795 f, 0.493516 f, 0.494236 f, 0.494957 f, 0.495677 f, 0.496398 f, 0.497118 f, 0.497839 f, 0.498559 f, 0.499280 f, 0.500000 f, 0.500720 f, 0.501441 f, 0.502161 f, 0.502882 f, 0.503602 f, 0.504323 f, 0.505043 f, 0.505764 f, 0.506484 f, 0.507205 f, 0.507925 f, 0.508646 f, 0.509366 f, 0.510086 f, 0.510807 f, 0.511527 f, 0.512248 f, 0.512968 f, 0.513689 f, 0.514409 f, 0.515130 f, 0.515850 f, 0.516571 f, 0.517291 f, 0.518012 f, 0.518732 f, 0.519452 f, 0.520173 f, 0.520893 f, 0.521614 f, 0.522334 f, 0.523055 f, 0.523775 f, 0.524496 f, 0.525216 f, 0.525937 f, 0.526657 f, 0.527378 f, 0.528098 f, 0.528818 f, 0.529539 f, 0.530259 f, 0.530980 f, 0.531700 f, 0.532421 f, 0.533141 f, 0.533862 f, 0.534582 f, 0.535303 f, 0.536023 f, 0.536744 f, 0.537464 f, 0.538184 f, 0.538905 f, 0.539625 f, 0.540346 f, 0.541066 f, 0.541787 f, 0.542507 f, 0.543228 f, 0.543948 f, 0.544669 f, 0.545389 f, 0.546109 f, 0.546830 f, 0.547550 f, 0.548271 f, 0.548991 f, 0.549712 f, 0.550432 f, 0.551153 f, 0.551873 f, 0.552594 f, 0.553314 f, 0.554035 f, 0.554755 f, 0.555476 f, 0.556196 f, 0.556916 f, 0.557637 f, 0.558357 f, 0.559078 f, 0.559798 f, 0.560519 f, 0.561239 f, 0.561960 f, 0.562680 f, 0.563401 f, 0.564121 f, 0.564842 f, 0.565562 f, 0.566282 f, 0.567003 f, 0.567723 f, 0.568444 f, 0.569164 f, 0.569885 f, 0.570605 f, 0.571326 f, 0.572046 f, 0.572767 f, 0.573487 f, 0.574207 f, 0.574928 f, 0.575648 f, 0.576369 f, 0.577089 f, 0.577810 f, 0.578530 f, 0.579251 f, 0.579971 f, 0.580692 f, 0.581412 f, 0.582133 f, 0.582853 f, 0.583573 f, 0.584294 f, 0.585014 f, 0.585735 f, 0.586455 f, 0.587176 f, 0.587896 f, 0.588617 f, 0.589337 f, 0.590058 f, 0.590778 f, 0.591499 f, 0.592219 f, 0.592939 f, 0.593660 f, 0.594380 f, 0.595101 f, 0.595821 f, 0.596542 f, 0.597262 f, 0.597983 f, 0.598703 f, 0.599424 f, 0.600144 f, 0.600865 f, 0.601585 f, 0.602305 f, 0.603026 f, 0.603746 f, 0.604467 f, 0.605187 f, 0.605908 f, 0.606628 f, 0.607349 f, 0.608069 f, 0.608790 f, 0.609510 f, 0.610231 f, 0.610951 f, 0.611671 f, 0.612392 f, 0.613112 f, 0.613833 f, 0.614553 f, 0.615274 f, 0.615994 f, 0.616715 f, 0.617435 f, 0.618156 f, 0.618876 f, 0.619597 f, 0.620317 f, 0.621037 f, 0.621758 f, 0.622478 f, 0.623199 f, 0.623919 f, 0.624640 f, 0.625360 f, 0.626081 f, 0.626801 f, 0.627522 f, 0.628242 f, 0.628963 f, 0.629683 f, 0.630403 f, 0.631124 f, 0.631844 f, 0.632565 f, 0.633285 f, 0.634006 f, 0.634726 f, 0.635447 f, 0.636167 f, 0.636888 f, 0.637608 f, 0.638329 f, 0.639049 f, 0.639769 f, 0.640490 f, 0.641210 f, 0.641931 f, 0.642651 f, 0.643372 f, 0.644092 f, 0.644813 f, 0.645533 f, 0.646254 f, 0.646974 f, 0.647695 f, 0.648415 f, 0.649135 f, 0.649856 f, 0.650576 f, 0.651297 f, 0.652017 f, 0.652738 f, 0.653458 f, 0.654179 f, 0.654899 f, 0.655620 f, 0.656340 f, 0.657061 f, 0.657781 f, 0.658501 f, 0.659222 f, 0.659942 f, 0.660663 f, 0.661383 f, 0.662104 f, 0.662824 f, 0.663545 f, 0.664265 f, 0.664986 f, 0.665706 f, 0.666427 f, 0.667147 f, 0.667867 f, 0.668588 f, 0.669308 f, 0.670029 f, 0.670749 f, 0.671470 f, 0.672190 f, 0.672911 f, 0.673631 f, 0.674352 f, 0.675072 f, 0.675793 f, 0.676513 f, 0.677233 f, 0.677954 f, 0.678674 f, 0.679395 f, 0.680115 f, 0.680836 f, 0.681556 f, 0.682277 f, 0.682997 f, 0.683718 f, 0.684438 f, 0.685158 f, 0.685879 f, 0.686599 f, 0.687320 f, 0.688040 f, 0.688761 f, 0.689481 f, 0.690202 f, 0.690922 f, 0.691643 f, 0.692363 f, 0.693084 f, 0.693804 f, 0.694524 f, 0.695245 f, 0.695965 f, 0.696686 f, 0.697406 f, 0.698127 f, 0.698847 f, 0.699568 f, 0.700288 f, 0.701009 f, 0.701729 f, 0.702450 f, 0.703170 f, 0.703891 f, 0.704611 f, 0.705331 f, 0.706052 f, 0.706772 f, 0.707493 f, 0.708213 f, 0.708934 f, 0.709654 f, 0.710375 f, 0.711095 f, 0.711816 f, 0.712536 f, 0.713256 f, 0.713977 f, 0.714697 f, 0.715418 f, 0.716138 f, 0.716859 f, 0.717579 f, 0.718300 f, 0.719020 f, 0.719741 f, 0.720461 f, 0.721182 f, 0.721902 f, 0.722622 f, 0.723343 f, 0.724063 f, 0.724784 f, 0.725504 f, 0.726225 f, 0.726945 f, 0.727666 f, 0.728386 f, 0.729107 f, 0.729827 f, 0.730548 f, 0.731268 f, 0.731988 f, 0.732709 f, 0.733429 f, 0.734150 f, 0.734870 f, 0.735591 f, 0.736311 f, 0.737032 f, 0.737752 f, 0.738473 f, 0.739193 f, 0.739914 f, 0.740634 f, 0.741354 f, 0.742075 f, 0.742795 f, 0.743516 f, 0.744236 f, 0.744957 f, 0.745677 f, 0.746398 f, 0.747118 f, 0.747839 f, 0.748559 f, 0.749280 f, 0.750000 f, 0.750720 f, 0.751441 f, 0.752161 f, 0.752882 f, 0.753602 f, 0.754323 f, 0.755043 f, 0.755764 f, 0.756484 f, 0.757205 f, 0.757925 f, 0.758646 f, 0.759366 f, 0.760086 f, 0.760807 f, 0.761527 f, 0.762248 f, 0.762968 f, 0.763689 f, 0.764409 f, 0.765130 f, 0.765850 f, 0.766571 f, 0.767291 f, 0.768012 f, 0.768732 f, 0.769452 f, 0.770173 f, 0.770893 f, 0.771614 f, 0.772334 f, 0.773055 f, 0.773775 f, 0.774496 f, 0.775216 f, 0.775937 f, 0.776657 f, 0.777378 f, 0.778098 f, 0.778818 f, 0.779539 f, 0.780259 f, 0.780980 f, 0.781700 f, 0.782421 f, 0.783141 f, 0.783862 f, 0.784582 f, 0.785303 f, 0.786023 f, 0.786744 f, 0.787464 f, 0.788184 f, 0.788905 f, 0.789625 f, 0.790346 f, 0.791066 f, 0.791787 f, 0.792507 f, 0.793228 f, 0.793948 f, 0.794669 f, 0.795389 f, 0.796109 f, 0.796830 f, 0.797550 f, 0.798271 f, 0.798991 f, 0.799712 f, 0.800432 f, 0.801153 f, 0.801873 f, 0.802594 f, 0.803314 f, 0.804035 f, 0.804755 f, 0.805476 f, 0.806196 f, 0.806916 f, 0.807637 f, 0.808357 f, 0.809078 f, 0.809798 f, 0.810519 f, 0.811239 f, 0.811960 f, 0.812680 f, 0.813401 f, 0.814121 f, 0.814842 f, 0.815562 f, 0.816282 f, 0.817003 f, 0.817723 f, 0.818444 f, 0.819164 f, 0.819885 f, 0.820605 f, 0.821326 f, 0.822046 f, 0.822767 f, 0.823487 f, 0.824207 f, 0.824928 f, 0.825648 f, 0.826369 f, 0.827089 f, 0.827810 f, 0.828530 f, 0.829251 f, 0.829971 f, 0.830692 f, 0.831412 f, 0.832133 f, 0.832853 f, 0.833573 f, 0.834294 f, 0.835014 f, 0.835735 f, 0.836455 f, 0.837176 f, 0.837896 f, 0.838617 f, 0.839337 f, 0.840058 f, 0.840778 f, 0.841499 f, 0.842219 f, 0.842939 f, 0.843660 f, 0.844380 f, 0.845101 f, 0.845821 f, 0.846542 f, 0.847262 f, 0.847983 f, 0.848703 f, 0.849424 f, 0.850144 f, 0.850865 f, 0.851585 f, 0.852305 f, 0.853026 f, 0.853746 f, 0.854467 f, 0.855187 f, 0.855908 f, 0.856628 f, 0.857349 f, 0.858069 f, 0.858790 f, 0.859510 f, 0.860231 f, 0.860951 f, 0.861671 f, 0.862392 f, 0.863112 f, 0.863833 f, 0.864553 f, 0.865274 f, 0.865994 f, 0.866715 f, 0.867435 f, 0.868156 f, 0.868876 f, 0.869597 f, 0.870317 f, 0.871037 f, 0.871758 f, 0.872478 f, 0.873199 f, 0.873919 f, 0.874640 f, 0.875360 f, 0.876081 f, 0.876801 f, 0.877522 f, 0.878242 f, 0.878963 f, 0.879683 f, 0.880403 f, 0.881124 f, 0.881844 f, 0.882565 f, 0.883285 f, 0.884006 f, 0.884726 f, 0.885447 f, 0.886167 f, 0.886888 f, 0.887608 f, 0.888329 f, 0.889049 f, 0.889769 f, 0.890490 f, 0.891210 f, 0.891931 f, 0.892651 f, 0.893372 f, 0.894092 f, 0.894813 f, 0.895533 f, 0.896254 f, 0.896974 f, 0.897695 f, 0.898415 f, 0.899135 f, 0.899856 f, 0.900576 f, 0.901297 f, 0.902017 f, 0.902738 f, 0.903458 f, 0.904179 f, 0.904899 f, 0.905620 f, 0.906340 f, 0.907061 f, 0.907781 f, 0.908501 f, 0.909222 f, 0.909942 f, 0.910663 f, 0.911383 f, 0.912104 f, 0.912824 f, 0.913545 f, 0.914265 f, 0.914986 f, 0.915706 f, 0.916427 f, 0.917147 f, 0.917867 f, 0.918588 f, 0.919308 f, 0.920029 f, 0.920749 f, 0.921470 f, 0.922190 f, 0.922911 f, 0.923631 f, 0.924352 f, 0.925072 f, 0.925793 f, 0.926513 f, 0.927233 f, 0.927954 f, 0.928674 f, 0.929395 f, 0.930115 f, 0.930836 f, 0.931556 f, 0.932277 f, 0.932997 f, 0.933718 f, 0.934438 f, 0.935158 f, 0.935879 f, 0.936599 f, 0.937320 f, 0.938040 f, 0.938761 f, 0.939481 f, 0.940202 f, 0.940922 f, 0.941643 f, 0.942363 f, 0.943084 f, 0.943804 f, 0.944524 f, 0.945245 f, 0.945965 f, 0.946686 f, 0.947406 f, 0.948127 f, 0.948847 f, 0.949568 f, 0.950288 f, 0.951009 f, 0.951729 f, 0.952450 f, 0.953170 f, 0.953891 f, 0.954611 f, 0.955331 f, 0.956052 f, 0.956772 f, 0.957493 f, 0.958213 f, 0.958934 f, 0.959654 f, 0.960375 f, 0.961095 f, 0.961816 f, 0.962536 f, 0.963256 f, 0.963977 f, 0.964697 f, 0.965418 f, 0.966138 f, 0.966859 f, 0.967579 f, 0.968300 f, 0.969020 f, 0.969741 f, 0.970461 f, 0.971182 f, 0.971902 f, 0.972622 f, 0.973343 f, 0.974063 f, 0.974784 f, 0.975504 f, 0.976225 f, 0.976945 f, 0.977666 f, 0.978386 f, 0.979107 f, 0.979827 f, 0.980548 f, 0.981268 f, 0.981988 f, 0.982709 f, 0.983429 f, 0.984150 f, 0.984870 f, 0.985591 f, 0.986311 f, 0.987032 f, 0.987752 f, 0.988473 f, 0.989193 f, 0.989914 f, 0.990634 f, 0.991354 f, 0.992075 f, 0.992795 f, 0.993516 f, 0.994236 f, 0.994957 f, 0.995677 f, 0.996398 f, 0.997118 f, 0.997839 f, 0.998559 f, 0.999280 f, 1.000000 f }; CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket * y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = MagickTrue; progress = 0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* * Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LinearGRAYColorspace: case GRAYColorspace: { /* * Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray = (MagickRealType) GetPixelGray(image, q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray = EncodePixelGamma(gray); SetPixelRed(image, ClampToQuantum(gray), q); SetPixelGreen(image, ClampToQuantum(gray), q); SetPixelBlue(image, ClampToQuantum(gray), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* * Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X = QuantumScale * GetPixelRed(image, q); Y = QuantumScale * GetPixelGreen(image, q); Z = QuantumScale * GetPixelBlue(image, q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X, Y, Z, &red, &green, &blue); break; } case HCLColorspace: { ConvertHCLToRGB(X, Y, Z, &red, &green, &blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X, Y, Z, &red, &green, &blue); break; } case HSBColorspace: { ConvertHSBToRGB(X, Y, Z, &red, &green, &blue); break; } case HSIColorspace: { ConvertHSIToRGB(X, Y, Z, &red, &green, &blue); break; } case HSLColorspace: { ConvertHSLToRGB(X, Y, Z, &red, &green, &blue); break; } case HSVColorspace: { ConvertHSVToRGB(X, Y, Z, &red, &green, &blue); break; } case HWBColorspace: { ConvertHWBToRGB(X, Y, Z, &red, &green, &blue); break; } case LabColorspace: { ConvertLabToRGB(X, Y, Z, &red, &green, &blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X, Y, Z, &red, &green, &blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X, Y, Z, &red, &green, &blue); break; } case LMSColorspace: { ConvertLMSToRGB(X, Y, Z, &red, &green, &blue); break; } case LuvColorspace: { ConvertLuvToRGB(X, Y, Z, &red, &green, &blue); break; } case xyYColorspace: { ConvertxyYToRGB(X, Y, Z, &red, &green, &blue); break; } case XYZColorspace: { ConvertXYZToRGB(X, Y, Z, &red, &green, &blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X, Y, Z, &red, &green, &blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X, Y, Z, &red, &green, &blue); break; } case YIQColorspace: { ConvertYIQToRGB(X, Y, Z, &red, &green, &blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X, Y, Z, &red, &green, &blue); break; } case YUVColorspace: { ConvertYUVToRGB(X, Y, Z, &red, &green, &blue); break; } default: { red = QuantumRange * X; green = QuantumRange * Y; blue = QuantumRange * Z; break; } } SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum * logmap; /* * Transform Log to sRGB colorspace. */ density = DisplayGamma; gamma = DisplayGamma; value = GetImageProperty(image, "gamma", exception); if (value != (const char *)NULL) gamma = PerceptibleReciprocal(StringToDouble(value, (char **)NULL)); film_gamma = FilmGamma; value = GetImageProperty(image, "film-gamma", exception); if (value != (const char *)NULL) film_gamma = StringToDouble(value, (char **)NULL); reference_black = ReferenceBlack; value = GetImageProperty(image, "reference-black", exception); if (value != (const char *)NULL) reference_black = StringToDouble(value, (char **)NULL); reference_white = ReferenceWhite; value = GetImageProperty(image, "reference-white", exception); if (value != (const char *)NULL) reference_white = StringToDouble(value, (char **)NULL); logmap = (Quantum *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); black = pow(10.0, (reference_black - reference_white) * (gamma / density) * 0.002 / film_gamma); for (i = 0; i <= (ssize_t) (reference_black * MaxMap / 1024.0); i++) logmap[i] = (Quantum) 0; for (; i < (ssize_t) (reference_white * MaxMap / 1024.0); i++) logmap[i] = ClampToQuantum(QuantumRange / (1.0 - black) * (pow(10.0, (1024.0 * i / MaxMap - reference_white) * (gamma / density) * 0.002 / film_gamma) - black)); for (; i <= (ssize_t) MaxMap; i++) logmap[i] = QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { double blue, green, red; red = (double)logmap[ScaleQuantumToMap(GetPixelRed(image, q))]; green = (double)logmap[ScaleQuantumToMap(GetPixelGreen(image, q))]; blue = (double)logmap[ScaleQuantumToMap(GetPixelBlue(image, q))]; SetPixelRed(image, ClampToQuantum(EncodePixelGamma((MagickRealType) red)), q); SetPixelGreen(image, ClampToQuantum(EncodePixelGamma((MagickRealType) green)), q); SetPixelBlue(image, ClampToQuantum(EncodePixelGamma((MagickRealType) blue)), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); logmap = (Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case RGBColorspace: case scRGBColorspace: { /* * Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { double blue, green, red; red = EncodePixelGamma((MagickRealType) GetPixelRed(image, q)); green = EncodePixelGamma((MagickRealType) GetPixelGreen(image, q)); blue = EncodePixelGamma((MagickRealType) GetPixelBlue(image, q)); SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } default: break; } /* * Allocate the tables. */ x_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*x_map)); y_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*y_map)); z_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map = (TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map = (TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map = (TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* * Initialize OHTA tables: * * I1 = 0.33333*R+0.33334*G+0.33333*B I2 = * 0.50000*R+0.00000*G-0.50000*B I3 * =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G * = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 * * I and Q, normally -0.5 through 0.5, must be normalized to the * range 0 through QuantumRange. */ for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * (double)i); y_map[i].x = (MagickRealType) (0.5 * 1.00000 * (2.0 * (double)i - MaxMap)); z_map[i].x = (MagickRealType) (-0.5 * 0.66668 * (2.0 * (double)i - MaxMap)); x_map[i].y = (MagickRealType) (1.0 * (double)i); y_map[i].y = (MagickRealType) (0.5 * 0.00000 * (2.0 * (double)i - MaxMap)); z_map[i].y = (MagickRealType) (0.5 * 1.33333 * (2.0 * (double)i - MaxMap)); x_map[i].z = (MagickRealType) (1.0 * (double)i); y_map[i].z = (MagickRealType) (-0.5 * 1.00000 * (2.0 * (double)i - MaxMap)); z_map[i].z = (MagickRealType) (-0.5 * 0.66668 * (2.0 * (double)i - MaxMap)); } break; } case Rec601YCbCrColorspace: { /* * Initialize YCbCr tables: * * R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = * Y+1.772000*Cb * * Cb and Cr, normally -0.5 through 0.5, must be normalized to the * range 0 through QuantumRange. */ for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = 0.99999999999914679361 * (double)i; y_map[i].x = 0.5 * (-1.2188941887145875e-06) * (2.00 * (double)i - MaxMap); z_map[i].x = 0.5 * 1.4019995886561440468 * (2.00 * (double)i - MaxMap); x_map[i].y = 0.99999975910502514331 * (double)i; y_map[i].y = 0.5 * (-0.34413567816504303521) * (2.00 * (double)i - MaxMap); z_map[i].y = 0.5 * (-0.71413649331646789076) * (2.00 * (double)i - MaxMap); x_map[i].z = 1.00000124040004623180 * (double)i; y_map[i].z = 0.5 * 1.77200006607230409200 * (2.00 * (double)i - MaxMap); z_map[i].z = 0.5 * 2.1453384174593273e-06 * (2.00 * (double)i - MaxMap); } break; } case Rec709YCbCrColorspace: { /* * Initialize YCbCr tables: * * R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = * Y+1.855600*Cb * * Cb and Cr, normally -0.5 through 0.5, must be normalized to the * range 0 through QuantumRange. */ for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * i); y_map[i].x = (MagickRealType) (0.5 * 0.000000 * (2.0 * i - MaxMap)); z_map[i].x = (MagickRealType) (0.5 * 1.574800 * (2.0 * i - MaxMap)); x_map[i].y = (MagickRealType) (1.0 * i); y_map[i].y = (MagickRealType) (0.5 * (-0.187324) * (2.0 * i - MaxMap)); z_map[i].y = (MagickRealType) (0.5 * (-0.468124) * (2.0 * i - MaxMap)); x_map[i].z = (MagickRealType) (1.0 * i); y_map[i].z = (MagickRealType) (0.5 * 1.855600 * (2.0 * i - MaxMap)); z_map[i].z = (MagickRealType) (0.5 * 0.000000 * (2.0 * i - MaxMap)); } break; } case YCCColorspace: { /* * Initialize YCC tables: * * R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = * Y+1.632639*C1 * * YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.3584000 * (double)i); y_map[i].x = (MagickRealType) 0.0000000; z_map[i].x = (MagickRealType) (1.8215000 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y = (MagickRealType) (1.3584000 * (double)i); y_map[i].y = (MagickRealType) (-0.4302726 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y = (MagickRealType) (-0.9271435 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z = (MagickRealType) (1.3584000 * (double)i); y_map[i].z = (MagickRealType) (2.2179000 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z = (MagickRealType) 0.0000000; } break; } default: { /* * Linear conversion tables. */ for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * (double)i); y_map[i].x = (MagickRealType) 0.0; z_map[i].x = (MagickRealType) 0.0; x_map[i].y = (MagickRealType) 0.0; y_map[i].y = (MagickRealType) (1.0 * (double)i); z_map[i].y = (MagickRealType) 0.0; x_map[i].z = (MagickRealType) 0.0; y_map[i].z = (MagickRealType) 0.0; z_map[i].z = (MagickRealType) (1.0 * (double)i); } break; } } /* * Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* * Convert DirectClass image. */ image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red = ScaleQuantumToMap(GetPixelRed(image, q)); green = ScaleQuantumToMap(GetPixelGreen(image, q)); blue = ScaleQuantumToMap(GetPixelBlue(image, q)); pixel.red = x_map[red].x + y_map[green].x + z_map[blue].x; pixel.green = x_map[red].y + y_map[green].y + z_map[blue].y; pixel.blue = x_map[red].z + y_map[green].z + z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.red / (double)MaxMap)]; pixel.green = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.green / (double)MaxMap)]; pixel.blue = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.blue / (double)MaxMap)]; } else { pixel.red = (MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green = (MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue = (MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image, ClampToQuantum(pixel.red), q); SetPixelGreen(image, ClampToQuantum(pixel.green), q); SetPixelBlue(image, ClampToQuantum(pixel.blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, TransformsRGBImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); break; } case PseudoClass: { /* * Convert PseudoClass image. */ for (i = 0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red = x_map[red].x + y_map[green].x + z_map[blue].x; pixel.green = x_map[red].y + y_map[green].y + z_map[blue].y; pixel.blue = x_map[red].z + y_map[green].z + z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.red / (double)MaxMap)]; pixel.green = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.green / (double)MaxMap)]; pixel.blue = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.blue / (double)MaxMap)]; } else { pixel.red = (MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green = (MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue = (MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red = (double)ClampToQuantum(pixel.red); image->colormap[i].green = (double)ClampToQuantum(pixel.green); image->colormap[i].blue = (double)ClampToQuantum(pixel.blue); } (void)SyncImage(image, exception); break; } } /* * Relinquish resources. */ z_map = (TransformPacket *) RelinquishMagickMemory(z_map); y_map = (TransformPacket *) RelinquishMagickMemory(y_map); x_map = (TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (MagickTrue); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* * Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* * Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e C o l o r s p a c e T y p e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageColorspaceType() returns the potential type of image: % * sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To * ensure the image type matches its potential, use SetImageColorspaceType(): * % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), * % exception); % % The format of the GetImageColorspaceType method * is: % % ColorspaceType GetImageColorspaceType(const Image *image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image * image, ExceptionInfo * exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); colorspace = image->colorspace; type = IdentifyImageType(image, exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) colorspace = GRAYColorspace; return (colorspace); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + s R G B T r a n s f o r m I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % sRGBTransformImage() converts the reference image from sRGB to * an alternate % colorspace. The transformation matrices are not the * standard ones: the % weights are rescaled to normalized the range of the * transformed values to % be [0..QuantumRange]. % % The format of the * sRGBTransformImage method is: % % MagickBooleanType * sRGBTransformImage(Image *image, % const ColorspaceType * colorspace,EsceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o colorspace: the colorspace to * transform the image to. % % o exception: return any errors or warnings * in this structure. % */ static inline void ConvertRGBToCMY(const double red, const double green, const double blue, double *cyan, double *magenta, double *yellow) { *cyan = QuantumScale * (QuantumRange - red); *magenta = QuantumScale * (QuantumRange - green); *yellow = QuantumScale * (QuantumRange - blue); } static inline void ConvertXYZToLMS(const double x, const double y, const double z, double *L, double *M, double *S) { *L = 0.7328 * x + 0.4296 * y - 0.1624 * z; *M = (-0.7036 * x + 1.6975 * y + 0.0061 * z); *S = 0.0030 * x + 0.0136 * y + 0.9834 * z; } static void ConvertRGBToLMS(const double red, const double green, const double blue, double *L, double *M, double *S) { double X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); ConvertXYZToLMS(X, Y, Z, L, M, S); } static void ConvertRGBToLab(const double red, const double green, const double blue, double *L, double *a, double *b) { double X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); ConvertXYZToLab(X, Y, Z, L, a, b); } static void ConvertRGBToLuv(const double red, const double green, const double blue, double *L, double *u, double *v) { double X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); ConvertXYZToLuv(X, Y, Z, L, u, v); } static void ConvertRGBToxyY(const double red, const double green, const double blue, double *low_x, double *low_y, double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); gamma = PerceptibleReciprocal(X + Y + Z); *low_x = gamma * X; *low_y = gamma * Y; *cap_Y = Y; } static void ConvertRGBToYDbDr(const double red, const double green, const double blue, double *Y, double *Db, double *Dr) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *Db = QuantumScale * (-0.450 * red - 0.883 * green + 1.333 * blue) + 0.5; *Dr = QuantumScale * (-1.333 * red + 1.116 * green + 0.217 * blue) + 0.5; } static void ConvertRGBToYIQ(const double red, const double green, const double blue, double *Y, double *I, double *Q) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *I = QuantumScale * (0.595716 * red - 0.274453 * green - 0.321263 * blue) + 0.5; *Q = QuantumScale * (0.211456 * red - 0.522591 * green + 0.311135 * blue) + 0.5; } static void ConvertRGBToYPbPr(const double red, const double green, const double blue, double *Y, double *Pb, double *Pr) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *Pb = QuantumScale * ((-0.1687367) * red - 0.331264 * green + 0.5 * blue) + 0.5; *Pr = QuantumScale * (0.5 * red - 0.418688 * green - 0.081312 * blue) + 0.5; } static void ConvertRGBToYCbCr(const double red, const double green, const double blue, double *Y, double *Cb, double *Cr) { ConvertRGBToYPbPr(red, green, blue, Y, Cb, Cr); } static void ConvertRGBToYUV(const double red, const double green, const double blue, double *Y, double *U, double *V) { *Y = QuantumScale * (0.298839 * red + 0.586811 * green + 0.114350 * blue); *U = QuantumScale * ((-0.147) * red - 0.289 * green + 0.436 * blue) + 0.5; *V = QuantumScale * (0.615 * red - 0.515 * green - 0.100 * blue) + 0.5; } static MagickBooleanType sRGBTransformImage(Image * image, const ColorspaceType colorspace, ExceptionInfo * exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket * x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status = MagickTrue; progress = 0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* * Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->type = image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LinearGRAYColorspace: case GRAYColorspace: { /* * Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGray(image, ClampToQuantum(GetPixelIntensity(image, q)), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); image->type = GrayscaleType; return (status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* * Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red = (double)GetPixelRed(image, q); green = (double)GetPixelGreen(image, q); blue = (double)GetPixelBlue(image, q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red, green, blue, &X, &Y, &Z); break; } case HCLColorspace: { ConvertRGBToHCL(red, green, blue, &X, &Y, &Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red, green, blue, &X, &Y, &Z); break; } case HSBColorspace: { ConvertRGBToHSB(red, green, blue, &X, &Y, &Z); break; } case HSIColorspace: { ConvertRGBToHSI(red, green, blue, &X, &Y, &Z); break; } case HSLColorspace: { ConvertRGBToHSL(red, green, blue, &X, &Y, &Z); break; } case HSVColorspace: { ConvertRGBToHSV(red, green, blue, &X, &Y, &Z); break; } case HWBColorspace: { ConvertRGBToHWB(red, green, blue, &X, &Y, &Z); break; } case LabColorspace: { ConvertRGBToLab(red, green, blue, &X, &Y, &Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red, green, blue, &X, &Y, &Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red, green, blue, &X, &Y, &Z); break; } case LMSColorspace: { ConvertRGBToLMS(red, green, blue, &X, &Y, &Z); break; } case LuvColorspace: { ConvertRGBToLuv(red, green, blue, &X, &Y, &Z); break; } case xyYColorspace: { ConvertRGBToxyY(red, green, blue, &X, &Y, &Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red, green, blue, &X, &Y, &Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red, green, blue, &X, &Y, &Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red, green, blue, &X, &Y, &Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red, green, blue, &X, &Y, &Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red, green, blue, &X, &Y, &Z); break; } case YUVColorspace: { ConvertRGBToYUV(red, green, blue, &X, &Y, &Z); break; } default: { X = QuantumScale * red; Y = QuantumScale * green; Z = QuantumScale * blue; break; } } SetPixelRed(image, ClampToQuantum(QuantumRange * X), q); SetPixelGreen(image, ClampToQuantum(QuantumRange * Y), q); SetPixelBlue(image, ClampToQuantum(QuantumRange * Z), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum * logmap; /* * Transform RGB to Log colorspace. */ density = DisplayGamma; gamma = DisplayGamma; value = GetImageProperty(image, "gamma", exception); if (value != (const char *)NULL) gamma = PerceptibleReciprocal(StringToDouble(value, (char **)NULL)); film_gamma = FilmGamma; value = GetImageProperty(image, "film-gamma", exception); if (value != (const char *)NULL) film_gamma = StringToDouble(value, (char **)NULL); reference_black = ReferenceBlack; value = GetImageProperty(image, "reference-black", exception); if (value != (const char *)NULL) reference_black = StringToDouble(value, (char **)NULL); reference_white = ReferenceWhite; value = GetImageProperty(image, "reference-white", exception); if (value != (const char *)NULL) reference_white = StringToDouble(value, (char **)NULL); logmap = (Quantum *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); black = pow(10.0, (reference_black - reference_white) * (gamma / density) * 0.002 / film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) logmap[i] = ScaleMapToQuantum((double)(MaxMap * (reference_white + log10(black + (1.0 * i / MaxMap) * (1.0 - black)) / ((gamma / density) * 0.002 / film_gamma)) / 1024.0)); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { double blue, green, red; red = (double)DecodePixelGamma((MagickRealType) GetPixelRed(image, q)); green = (double)DecodePixelGamma((MagickRealType) GetPixelGreen(image, q)); blue = (double)DecodePixelGamma((MagickRealType) GetPixelBlue(image, q)); SetPixelRed(image, logmap[ScaleQuantumToMap(ClampToQuantum(red))], q); SetPixelGreen(image, logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image, logmap[ScaleQuantumToMap(ClampToQuantum(blue))], q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); logmap = (Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case RGBColorspace: case scRGBColorspace: { /* * Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red; red = DecodePixelGamma((MagickRealType) GetPixelRed(image, q)); green = DecodePixelGamma((MagickRealType) GetPixelGreen(image, q)); blue = DecodePixelGamma((MagickRealType) GetPixelBlue(image, q)); SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } default: break; } /* * Allocate the tables. */ x_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*x_map)); y_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*y_map)); z_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map = (TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map = (TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map = (TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(&primary_info, 0, sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* * Initialize OHTA tables: * * I1 = 0.33333*R+0.33334*G+0.33333*B I2 = * 0.50000*R+0.00000*G-0.50000*B I3 * =-0.25000*R+0.50000*G-0.25000*B * * I and Q, normally -0.5 through 0.5, are normalized to the range 0 * through QuantumRange. */ primary_info.y = (double)(MaxMap + 1.0) / 2.0; primary_info.z = (double)(MaxMap + 1.0) / 2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (0.33333 * (double)i); y_map[i].x = (MagickRealType) (0.33334 * (double)i); z_map[i].x = (MagickRealType) (0.33333 * (double)i); x_map[i].y = (MagickRealType) (0.50000 * (double)i); y_map[i].y = (MagickRealType) (0.00000 * (double)i); z_map[i].y = (MagickRealType) (-0.50000 * (double)i); x_map[i].z = (MagickRealType) (-0.25000 * (double)i); y_map[i].z = (MagickRealType) (0.50000 * (double)i); z_map[i].z = (MagickRealType) (-0.25000 * (double)i); } break; } case Rec601YCbCrColorspace: { /* * Initialize YCbCr tables (ITU-R BT.601): * * Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= * -0.1687367*R-0.3312640*G+0.5000000*B Cr= * 0.5000000*R-0.4186880*G-0.0813120*B * * Cb and Cr, normally -0.5 through 0.5, are normalized to the range * 0 through QuantumRange. */ primary_info.y = (double)(MaxMap + 1.0) / 2.0; primary_info.z = (double)(MaxMap + 1.0) / 2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (0.298839 * (double)i); y_map[i].x = (MagickRealType) (0.586811 * (double)i); z_map[i].x = (MagickRealType) (0.114350 * (double)i); x_map[i].y = (MagickRealType) (-0.1687367 * (double)i); y_map[i].y = (MagickRealType) (-0.331264 * (double)i); z_map[i].y = (MagickRealType) (0.500000 * (double)i); x_map[i].z = (MagickRealType) (0.500000 * (double)i); y_map[i].z = (MagickRealType) (-0.418688 * (double)i); z_map[i].z = (MagickRealType) (-0.081312 * (double)i); } break; } case Rec709YCbCrColorspace: { /* * Initialize YCbCr tables (ITU-R BT.709): * * Y = 0.212656*R+0.715158*G+0.072186*B Cb= * -0.114572*R-0.385428*G+0.500000*B Cr= * 0.500000*R-0.454153*G-0.045847*B * * Cb and Cr, normally -0.5 through 0.5, are normalized to the range * 0 through QuantumRange. */ primary_info.y = (double)(MaxMap + 1.0) / 2.0; primary_info.z = (double)(MaxMap + 1.0) / 2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (0.212656 * (double)i); y_map[i].x = (MagickRealType) (0.715158 * (double)i); z_map[i].x = (MagickRealType) (0.072186 * (double)i); x_map[i].y = (MagickRealType) (-0.114572 * (double)i); y_map[i].y = (MagickRealType) (-0.385428 * (double)i); z_map[i].y = (MagickRealType) (0.500000 * (double)i); x_map[i].z = (MagickRealType) (0.500000 * (double)i); y_map[i].z = (MagickRealType) (-0.454153 * (double)i); z_map[i].z = (MagickRealType) (-0.045847 * (double)i); } break; } case YCCColorspace: { /* * Initialize YCC tables: * * Y = 0.298839*R+0.586811*G+0.114350*B C1= * -0.298839*R-0.586811*G+0.88600*B C2= * 0.70100*R-0.586811*G-0.114350*B * * YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y = (double)ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z = (double)ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i = 0; i <= (ssize_t) (0.018 * MaxMap); i++) { x_map[i].x = 0.005382 * i; y_map[i].x = 0.010566 * i; z_map[i].x = 0.002052 * i; x_map[i].y = (-0.003296) * i; y_map[i].y = (-0.006471) * i; z_map[i].y = 0.009768 * i; x_map[i].z = 0.009410 * i; y_map[i].z = (-0.007880) * i; z_map[i].z = (-0.001530) * i; } for (; i <= (ssize_t) MaxMap; i++) { x_map[i].x = 0.298839 * (1.099 * i - 0.099); y_map[i].x = 0.586811 * (1.099 * i - 0.099); z_map[i].x = 0.114350 * (1.099 * i - 0.099); x_map[i].y = (-0.298839) * (1.099 * i - 0.099); y_map[i].y = (-0.586811) * (1.099 * i - 0.099); z_map[i].y = 0.88600 * (1.099 * i - 0.099); x_map[i].z = 0.70100 * (1.099 * i - 0.099); y_map[i].z = (-0.586811) * (1.099 * i - 0.099); z_map[i].z = (-0.114350) * (1.099 * i - 0.099); } break; } default: { /* * Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * (double)i); y_map[i].x = (MagickRealType) 0.0; z_map[i].x = (MagickRealType) 0.0; x_map[i].y = (MagickRealType) 0.0; y_map[i].y = (MagickRealType) (1.0 * (double)i); z_map[i].y = (MagickRealType) 0.0; x_map[i].z = (MagickRealType) 0.0; y_map[i].z = (MagickRealType) 0.0; z_map[i].z = (MagickRealType) (1.0 * (double)i); } break; } } /* * Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* * Convert DirectClass image. */ image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum * magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { red = ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image, q))); green = ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image, q))); blue = ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image, q))); pixel.red = (x_map[red].x + y_map[green].x + z_map[blue].x) + primary_info.x; pixel.green = (x_map[red].y + y_map[green].y + z_map[blue].y) + primary_info.y; pixel.blue = (x_map[red].z + y_map[green].z + z_map[blue].z) + primary_info.z; SetPixelRed(image, ScaleMapToQuantum(pixel.red), q); SetPixelGreen(image, ScaleMapToQuantum(pixel.green), q); SetPixelBlue(image, ScaleMapToQuantum(pixel.blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_sRGBTransformImage) #endif proceed = SetImageProgress(image, sRGBTransformImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* * Convert PseudoClass image. */ for (i = 0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red = x_map[red].x + y_map[green].x + z_map[blue].x + primary_info.x; pixel.green = x_map[red].y + y_map[green].y + z_map[blue].y + primary_info.y; pixel.blue = x_map[red].z + y_map[green].z + z_map[blue].z + primary_info.z; image->colormap[i].red = (double)ScaleMapToQuantum(pixel.red); image->colormap[i].green = (double)ScaleMapToQuantum(pixel.green); image->colormap[i].blue = (double)ScaleMapToQuantum(pixel.blue); } (void)SyncImage(image, exception); break; } } /* * Relinquish resources. */ z_map = (TransformPacket *) RelinquishMagickMemory(z_map); y_map = (TransformPacket *) RelinquishMagickMemory(y_map); x_map = (TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image, colorspace, exception) == MagickFalse) return (MagickFalse); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e C o l o r s p a c e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageColorspace() sets the colorspace member of the Image * structure. % % The format of the SetImageColorspace method is: % % * MagickBooleanType SetImageColorspace(Image *image, % const * ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o colorspace: * the colorspace. % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image * image, const ColorspaceType colorspace, ExceptionInfo * exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return (MagickTrue); image->colorspace = colorspace; image->rendering_intent = UndefinedIntent; image->gamma = 1.000 / 2.200; (void)memset(&image->chromaticity, 0, sizeof(image->chromaticity)); type = image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma = 1.000; type = GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma = 1.000; else { image->rendering_intent = PerceptualIntent; image->chromaticity.red_primary.x = 0.6400; image->chromaticity.red_primary.y = 0.3300; image->chromaticity.red_primary.z = 0.0300; image->chromaticity.green_primary.x = 0.3000; image->chromaticity.green_primary.y = 0.6000; image->chromaticity.green_primary.z = 0.1000; image->chromaticity.blue_primary.x = 0.1500; image->chromaticity.blue_primary.y = 0.0600; image->chromaticity.blue_primary.z = 0.7900; image->chromaticity.white_point.x = 0.3127; image->chromaticity.white_point.y = 0.3290; image->chromaticity.white_point.z = 0.3583; } status = SyncImagePixelCache(image, exception); image->type = type; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e G r a y * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageGray() returns MagickTrue if all the pixels in the image * have the % same red, green, and blue intensities and changes the type of * the image to % bi-level or grayscale. % % The format of the SetImageGray * method is: % % MagickBooleanType SetImageGray(const Image *image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image * image, ExceptionInfo * exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (IsImageGray(image)) return (MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (MagickFalse); value = GetImageProperty(image, "colorspace:auto-grayscale", exception); if (IsStringFalse(value) != MagickFalse) return (MagickFalse); type = IdentifyImageGray(image, exception); if (type == UndefinedType) return (MagickFalse); image->colorspace = GRAYColorspace; if (SyncImagePixelCache((Image *) image, exception) == MagickFalse) return (MagickFalse); image->type = type; return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e M o n o c h r o m e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the * image have % the same red, green, and blue intensities and the intensity * is either % 0 or QuantumRange and changes the type of the image to * bi-level. % % The format of the SetImageMonochrome method is: % % * MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image * image, ExceptionInfo * exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->type == BilevelType) return (MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return (MagickFalse); value = GetImageProperty(image, "colorspace:auto-grayscale", exception); if (IsStringFalse(value) != MagickFalse) return (MagickFalse); if (IdentifyImageMonochrome(image, exception) == MagickFalse) return (MagickFalse); image->colorspace = GRAYColorspace; if (SyncImagePixelCache((Image *) image, exception) == MagickFalse) return (MagickFalse); image->type = BilevelType; return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % T r a n s f o r m I m a g e C o l o r s p a c e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TransformImageColorspace() transforms an image colorspace, * changing the % image data to reflect the new colorspace. % % The format * of the TransformImageColorspace method is: % % MagickBooleanType * TransformImageColorspace(Image *image, % const ColorspaceType * colorspace,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o colorspace: the colorspace. % * % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image * image, const ColorspaceType colorspace, ExceptionInfo * exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->colorspace == colorspace) return (SetImageColorspace(image, colorspace, exception)); (void)DeleteImageProfile(image, "icc"); (void)DeleteImageProfile(image, "icm"); if (colorspace == LinearGRAYColorspace) return (GrayscaleImage(image, Rec709LuminancePixelIntensityMethod, exception)); if (colorspace == GRAYColorspace) return (GrayscaleImage(image, Rec709LumaPixelIntensityMethod, exception)); if (colorspace == UndefinedColorspace) return (SetImageColorspace(image, colorspace, exception)); /* * Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return (TransformsRGBImage(image, exception)); status = MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status = TransformsRGBImage(image, exception); if (status == MagickFalse) return (status); /* * Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image, colorspace, exception) == MagickFalse) status = MagickFalse; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + T r a n s f o r m s R G B I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TransformsRGBImage() converts the reference image from an * alternate % colorspace to sRGB. The transformation matrices are not the * standard ones: % the weights are rescaled to normalize the range of the * transformed values % to be [0..QuantumRange]. % % The format of the * TransformsRGBImage method is: % % MagickBooleanType * TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan, const double magenta, const double yellow, double *red, double *green, double *blue) { *red = QuantumRange * (1.0 - cyan); *green = QuantumRange * (1.0 - magenta); *blue = QuantumRange * (1.0 - yellow); } static inline void ConvertLMSToXYZ(const double L, const double M, const double S, double *X, double *Y, double *Z) { *X = 1.096123820835514 * L - 0.278869000218287 * M + 0.182745179382773 * S; *Y = 0.454369041975359 * L + 0.473533154307412 * M + 0.072097803717229 * S; *Z = (-0.009627608738429) * L - 0.005698031216113 * M + 1.015325639954543 * S; } static inline void ConvertLMSToRGB(const double L, const double M, const double S, double *red, double *green, double *blue) { double X, Y, Z; ConvertLMSToXYZ(L, M, S, &X, &Y, &Z); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static inline void ConvertLuvToRGB(const double L, const double u, const double v, double *red, double *green, double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0 * L, 354.0 * u - 134.0, 262.0 * v - 140.0, &X, &Y, &Z); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return (0); if (value >= 1388.0) return (1388); return ((ssize_t) (value + 0.5)); } static inline void ConvertLabToRGB(const double L, const double a, const double b, double *red, double *green, double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0 * L, 255.0 * (a - 0.5), 255.0 * (b - 0.5), &X, &Y, &Z); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static inline void ConvertxyYToRGB(const double low_x, const double low_y, const double cap_Y, double *red, double *green, double *blue) { double gamma, X, Y, Z; gamma = PerceptibleReciprocal(low_y); X = gamma * cap_Y * low_x; Y = cap_Y; Z = gamma * cap_Y * (1.0 - low_x - low_y); ConvertXYZToRGB(X, Y, Z, red, green, blue); } static void ConvertYPbPrToRGB(const double Y, const double Pb, const double Pr, double *red, double *green, double *blue) { *red = QuantumRange * (0.99999999999914679361 * Y - 1.2188941887145875e-06 * (Pb - 0.5) + 1.4019995886561440468 * (Pr - 0.5)); *green = QuantumRange * (0.99999975910502514331 * Y - 0.34413567816504303521 * (Pb - 0.5) - 0.71413649331646789076 * (Pr - 0.5)); *blue = QuantumRange * (1.00000124040004623180 * Y + 1.77200006607230409200 * (Pb - 0.5) + 2.1453384174593273e-06 * (Pr - 0.5)); } static void ConvertYCbCrToRGB(const double Y, const double Cb, const double Cr, double *red, double *green, double *blue) { ConvertYPbPrToRGB(Y, Cb, Cr, red, green, blue); } static void ConvertYIQToRGB(const double Y, const double I, const double Q, double *red, double *green, double *blue) { *red = QuantumRange * (Y + 0.9562957197589482261 * (I - 0.5) + 0.6210244164652610754 * (Q - 0.5)); *green = QuantumRange * (Y - 0.2721220993185104464 * (I - 0.5) - 0.6473805968256950427 * (Q - 0.5)); *blue = QuantumRange * (Y - 1.1069890167364901945 * (I - 0.5) + 1.7046149983646481374 * (Q - 0.5)); } static void ConvertYDbDrToRGB(const double Y, const double Db, const double Dr, double *red, double *green, double *blue) { *red = QuantumRange * (Y + 9.2303716147657e-05 * (Db - 0.5) - 0.52591263066186533 * (Dr - 0.5)); *green = QuantumRange * (Y - 0.12913289889050927 * (Db - 0.5) + 0.26789932820759876 * (Dr - 0.5)); *blue = QuantumRange * (Y + 0.66467905997895482 * (Db - 0.5) - 7.9202543533108e-05 * (Dr - 0.5)); } static void ConvertYUVToRGB(const double Y, const double U, const double V, double *red, double *green, double *blue) { *red = QuantumRange * (Y - 3.945707070708279e-05 * (U - 0.5) + 1.1398279671717170825 * (V - 0.5)); *green = QuantumRange * (Y - 0.3946101641414141437 * (U - 0.5) - 0.5805003156565656797 * (V - 0.5)); *blue = QuantumRange * (Y + 2.0319996843434342537 * (U - 0.5) - 4.813762626262513e-04 * (V - 0.5)); } static MagickBooleanType TransformsRGBImage(Image * image, ExceptionInfo * exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000 f, 0.000720 f, 0.001441 f, 0.002161 f, 0.002882 f, 0.003602 f, 0.004323 f, 0.005043 f, 0.005764 f, 0.006484 f, 0.007205 f, 0.007925 f, 0.008646 f, 0.009366 f, 0.010086 f, 0.010807 f, 0.011527 f, 0.012248 f, 0.012968 f, 0.013689 f, 0.014409 f, 0.015130 f, 0.015850 f, 0.016571 f, 0.017291 f, 0.018012 f, 0.018732 f, 0.019452 f, 0.020173 f, 0.020893 f, 0.021614 f, 0.022334 f, 0.023055 f, 0.023775 f, 0.024496 f, 0.025216 f, 0.025937 f, 0.026657 f, 0.027378 f, 0.028098 f, 0.028818 f, 0.029539 f, 0.030259 f, 0.030980 f, 0.031700 f, 0.032421 f, 0.033141 f, 0.033862 f, 0.034582 f, 0.035303 f, 0.036023 f, 0.036744 f, 0.037464 f, 0.038184 f, 0.038905 f, 0.039625 f, 0.040346 f, 0.041066 f, 0.041787 f, 0.042507 f, 0.043228 f, 0.043948 f, 0.044669 f, 0.045389 f, 0.046110 f, 0.046830 f, 0.047550 f, 0.048271 f, 0.048991 f, 0.049712 f, 0.050432 f, 0.051153 f, 0.051873 f, 0.052594 f, 0.053314 f, 0.054035 f, 0.054755 f, 0.055476 f, 0.056196 f, 0.056916 f, 0.057637 f, 0.058357 f, 0.059078 f, 0.059798 f, 0.060519 f, 0.061239 f, 0.061960 f, 0.062680 f, 0.063401 f, 0.064121 f, 0.064842 f, 0.065562 f, 0.066282 f, 0.067003 f, 0.067723 f, 0.068444 f, 0.069164 f, 0.069885 f, 0.070605 f, 0.071326 f, 0.072046 f, 0.072767 f, 0.073487 f, 0.074207 f, 0.074928 f, 0.075648 f, 0.076369 f, 0.077089 f, 0.077810 f, 0.078530 f, 0.079251 f, 0.079971 f, 0.080692 f, 0.081412 f, 0.082133 f, 0.082853 f, 0.083573 f, 0.084294 f, 0.085014 f, 0.085735 f, 0.086455 f, 0.087176 f, 0.087896 f, 0.088617 f, 0.089337 f, 0.090058 f, 0.090778 f, 0.091499 f, 0.092219 f, 0.092939 f, 0.093660 f, 0.094380 f, 0.095101 f, 0.095821 f, 0.096542 f, 0.097262 f, 0.097983 f, 0.098703 f, 0.099424 f, 0.100144 f, 0.100865 f, 0.101585 f, 0.102305 f, 0.103026 f, 0.103746 f, 0.104467 f, 0.105187 f, 0.105908 f, 0.106628 f, 0.107349 f, 0.108069 f, 0.108790 f, 0.109510 f, 0.110231 f, 0.110951 f, 0.111671 f, 0.112392 f, 0.113112 f, 0.113833 f, 0.114553 f, 0.115274 f, 0.115994 f, 0.116715 f, 0.117435 f, 0.118156 f, 0.118876 f, 0.119597 f, 0.120317 f, 0.121037 f, 0.121758 f, 0.122478 f, 0.123199 f, 0.123919 f, 0.124640 f, 0.125360 f, 0.126081 f, 0.126801 f, 0.127522 f, 0.128242 f, 0.128963 f, 0.129683 f, 0.130403 f, 0.131124 f, 0.131844 f, 0.132565 f, 0.133285 f, 0.134006 f, 0.134726 f, 0.135447 f, 0.136167 f, 0.136888 f, 0.137608 f, 0.138329 f, 0.139049 f, 0.139769 f, 0.140490 f, 0.141210 f, 0.141931 f, 0.142651 f, 0.143372 f, 0.144092 f, 0.144813 f, 0.145533 f, 0.146254 f, 0.146974 f, 0.147695 f, 0.148415 f, 0.149135 f, 0.149856 f, 0.150576 f, 0.151297 f, 0.152017 f, 0.152738 f, 0.153458 f, 0.154179 f, 0.154899 f, 0.155620 f, 0.156340 f, 0.157061 f, 0.157781 f, 0.158501 f, 0.159222 f, 0.159942 f, 0.160663 f, 0.161383 f, 0.162104 f, 0.162824 f, 0.163545 f, 0.164265 f, 0.164986 f, 0.165706 f, 0.166427 f, 0.167147 f, 0.167867 f, 0.168588 f, 0.169308 f, 0.170029 f, 0.170749 f, 0.171470 f, 0.172190 f, 0.172911 f, 0.173631 f, 0.174352 f, 0.175072 f, 0.175793 f, 0.176513 f, 0.177233 f, 0.177954 f, 0.178674 f, 0.179395 f, 0.180115 f, 0.180836 f, 0.181556 f, 0.182277 f, 0.182997 f, 0.183718 f, 0.184438 f, 0.185159 f, 0.185879 f, 0.186599 f, 0.187320 f, 0.188040 f, 0.188761 f, 0.189481 f, 0.190202 f, 0.190922 f, 0.191643 f, 0.192363 f, 0.193084 f, 0.193804 f, 0.194524 f, 0.195245 f, 0.195965 f, 0.196686 f, 0.197406 f, 0.198127 f, 0.198847 f, 0.199568 f, 0.200288 f, 0.201009 f, 0.201729 f, 0.202450 f, 0.203170 f, 0.203890 f, 0.204611 f, 0.205331 f, 0.206052 f, 0.206772 f, 0.207493 f, 0.208213 f, 0.208934 f, 0.209654 f, 0.210375 f, 0.211095 f, 0.211816 f, 0.212536 f, 0.213256 f, 0.213977 f, 0.214697 f, 0.215418 f, 0.216138 f, 0.216859 f, 0.217579 f, 0.218300 f, 0.219020 f, 0.219741 f, 0.220461 f, 0.221182 f, 0.221902 f, 0.222622 f, 0.223343 f, 0.224063 f, 0.224784 f, 0.225504 f, 0.226225 f, 0.226945 f, 0.227666 f, 0.228386 f, 0.229107 f, 0.229827 f, 0.230548 f, 0.231268 f, 0.231988 f, 0.232709 f, 0.233429 f, 0.234150 f, 0.234870 f, 0.235591 f, 0.236311 f, 0.237032 f, 0.237752 f, 0.238473 f, 0.239193 f, 0.239914 f, 0.240634 f, 0.241354 f, 0.242075 f, 0.242795 f, 0.243516 f, 0.244236 f, 0.244957 f, 0.245677 f, 0.246398 f, 0.247118 f, 0.247839 f, 0.248559 f, 0.249280 f, 0.250000 f, 0.250720 f, 0.251441 f, 0.252161 f, 0.252882 f, 0.253602 f, 0.254323 f, 0.255043 f, 0.255764 f, 0.256484 f, 0.257205 f, 0.257925 f, 0.258646 f, 0.259366 f, 0.260086 f, 0.260807 f, 0.261527 f, 0.262248 f, 0.262968 f, 0.263689 f, 0.264409 f, 0.265130 f, 0.265850 f, 0.266571 f, 0.267291 f, 0.268012 f, 0.268732 f, 0.269452 f, 0.270173 f, 0.270893 f, 0.271614 f, 0.272334 f, 0.273055 f, 0.273775 f, 0.274496 f, 0.275216 f, 0.275937 f, 0.276657 f, 0.277378 f, 0.278098 f, 0.278818 f, 0.279539 f, 0.280259 f, 0.280980 f, 0.281700 f, 0.282421 f, 0.283141 f, 0.283862 f, 0.284582 f, 0.285303 f, 0.286023 f, 0.286744 f, 0.287464 f, 0.288184 f, 0.288905 f, 0.289625 f, 0.290346 f, 0.291066 f, 0.291787 f, 0.292507 f, 0.293228 f, 0.293948 f, 0.294669 f, 0.295389 f, 0.296109 f, 0.296830 f, 0.297550 f, 0.298271 f, 0.298991 f, 0.299712 f, 0.300432 f, 0.301153 f, 0.301873 f, 0.302594 f, 0.303314 f, 0.304035 f, 0.304755 f, 0.305476 f, 0.306196 f, 0.306916 f, 0.307637 f, 0.308357 f, 0.309078 f, 0.309798 f, 0.310519 f, 0.311239 f, 0.311960 f, 0.312680 f, 0.313401 f, 0.314121 f, 0.314842 f, 0.315562 f, 0.316282 f, 0.317003 f, 0.317723 f, 0.318444 f, 0.319164 f, 0.319885 f, 0.320605 f, 0.321326 f, 0.322046 f, 0.322767 f, 0.323487 f, 0.324207 f, 0.324928 f, 0.325648 f, 0.326369 f, 0.327089 f, 0.327810 f, 0.328530 f, 0.329251 f, 0.329971 f, 0.330692 f, 0.331412 f, 0.332133 f, 0.332853 f, 0.333573 f, 0.334294 f, 0.335014 f, 0.335735 f, 0.336455 f, 0.337176 f, 0.337896 f, 0.338617 f, 0.339337 f, 0.340058 f, 0.340778 f, 0.341499 f, 0.342219 f, 0.342939 f, 0.343660 f, 0.344380 f, 0.345101 f, 0.345821 f, 0.346542 f, 0.347262 f, 0.347983 f, 0.348703 f, 0.349424 f, 0.350144 f, 0.350865 f, 0.351585 f, 0.352305 f, 0.353026 f, 0.353746 f, 0.354467 f, 0.355187 f, 0.355908 f, 0.356628 f, 0.357349 f, 0.358069 f, 0.358790 f, 0.359510 f, 0.360231 f, 0.360951 f, 0.361671 f, 0.362392 f, 0.363112 f, 0.363833 f, 0.364553 f, 0.365274 f, 0.365994 f, 0.366715 f, 0.367435 f, 0.368156 f, 0.368876 f, 0.369597 f, 0.370317 f, 0.371037 f, 0.371758 f, 0.372478 f, 0.373199 f, 0.373919 f, 0.374640 f, 0.375360 f, 0.376081 f, 0.376801 f, 0.377522 f, 0.378242 f, 0.378963 f, 0.379683 f, 0.380403 f, 0.381124 f, 0.381844 f, 0.382565 f, 0.383285 f, 0.384006 f, 0.384726 f, 0.385447 f, 0.386167 f, 0.386888 f, 0.387608 f, 0.388329 f, 0.389049 f, 0.389769 f, 0.390490 f, 0.391210 f, 0.391931 f, 0.392651 f, 0.393372 f, 0.394092 f, 0.394813 f, 0.395533 f, 0.396254 f, 0.396974 f, 0.397695 f, 0.398415 f, 0.399135 f, 0.399856 f, 0.400576 f, 0.401297 f, 0.402017 f, 0.402738 f, 0.403458 f, 0.404179 f, 0.404899 f, 0.405620 f, 0.406340 f, 0.407061 f, 0.407781 f, 0.408501 f, 0.409222 f, 0.409942 f, 0.410663 f, 0.411383 f, 0.412104 f, 0.412824 f, 0.413545 f, 0.414265 f, 0.414986 f, 0.415706 f, 0.416427 f, 0.417147 f, 0.417867 f, 0.418588 f, 0.419308 f, 0.420029 f, 0.420749 f, 0.421470 f, 0.422190 f, 0.422911 f, 0.423631 f, 0.424352 f, 0.425072 f, 0.425793 f, 0.426513 f, 0.427233 f, 0.427954 f, 0.428674 f, 0.429395 f, 0.430115 f, 0.430836 f, 0.431556 f, 0.432277 f, 0.432997 f, 0.433718 f, 0.434438 f, 0.435158 f, 0.435879 f, 0.436599 f, 0.437320 f, 0.438040 f, 0.438761 f, 0.439481 f, 0.440202 f, 0.440922 f, 0.441643 f, 0.442363 f, 0.443084 f, 0.443804 f, 0.444524 f, 0.445245 f, 0.445965 f, 0.446686 f, 0.447406 f, 0.448127 f, 0.448847 f, 0.449568 f, 0.450288 f, 0.451009 f, 0.451729 f, 0.452450 f, 0.453170 f, 0.453891 f, 0.454611 f, 0.455331 f, 0.456052 f, 0.456772 f, 0.457493 f, 0.458213 f, 0.458934 f, 0.459654 f, 0.460375 f, 0.461095 f, 0.461816 f, 0.462536 f, 0.463256 f, 0.463977 f, 0.464697 f, 0.465418 f, 0.466138 f, 0.466859 f, 0.467579 f, 0.468300 f, 0.469020 f, 0.469741 f, 0.470461 f, 0.471182 f, 0.471902 f, 0.472622 f, 0.473343 f, 0.474063 f, 0.474784 f, 0.475504 f, 0.476225 f, 0.476945 f, 0.477666 f, 0.478386 f, 0.479107 f, 0.479827 f, 0.480548 f, 0.481268 f, 0.481988 f, 0.482709 f, 0.483429 f, 0.484150 f, 0.484870 f, 0.485591 f, 0.486311 f, 0.487032 f, 0.487752 f, 0.488473 f, 0.489193 f, 0.489914 f, 0.490634 f, 0.491354 f, 0.492075 f, 0.492795 f, 0.493516 f, 0.494236 f, 0.494957 f, 0.495677 f, 0.496398 f, 0.497118 f, 0.497839 f, 0.498559 f, 0.499280 f, 0.500000 f, 0.500720 f, 0.501441 f, 0.502161 f, 0.502882 f, 0.503602 f, 0.504323 f, 0.505043 f, 0.505764 f, 0.506484 f, 0.507205 f, 0.507925 f, 0.508646 f, 0.509366 f, 0.510086 f, 0.510807 f, 0.511527 f, 0.512248 f, 0.512968 f, 0.513689 f, 0.514409 f, 0.515130 f, 0.515850 f, 0.516571 f, 0.517291 f, 0.518012 f, 0.518732 f, 0.519452 f, 0.520173 f, 0.520893 f, 0.521614 f, 0.522334 f, 0.523055 f, 0.523775 f, 0.524496 f, 0.525216 f, 0.525937 f, 0.526657 f, 0.527378 f, 0.528098 f, 0.528818 f, 0.529539 f, 0.530259 f, 0.530980 f, 0.531700 f, 0.532421 f, 0.533141 f, 0.533862 f, 0.534582 f, 0.535303 f, 0.536023 f, 0.536744 f, 0.537464 f, 0.538184 f, 0.538905 f, 0.539625 f, 0.540346 f, 0.541066 f, 0.541787 f, 0.542507 f, 0.543228 f, 0.543948 f, 0.544669 f, 0.545389 f, 0.546109 f, 0.546830 f, 0.547550 f, 0.548271 f, 0.548991 f, 0.549712 f, 0.550432 f, 0.551153 f, 0.551873 f, 0.552594 f, 0.553314 f, 0.554035 f, 0.554755 f, 0.555476 f, 0.556196 f, 0.556916 f, 0.557637 f, 0.558357 f, 0.559078 f, 0.559798 f, 0.560519 f, 0.561239 f, 0.561960 f, 0.562680 f, 0.563401 f, 0.564121 f, 0.564842 f, 0.565562 f, 0.566282 f, 0.567003 f, 0.567723 f, 0.568444 f, 0.569164 f, 0.569885 f, 0.570605 f, 0.571326 f, 0.572046 f, 0.572767 f, 0.573487 f, 0.574207 f, 0.574928 f, 0.575648 f, 0.576369 f, 0.577089 f, 0.577810 f, 0.578530 f, 0.579251 f, 0.579971 f, 0.580692 f, 0.581412 f, 0.582133 f, 0.582853 f, 0.583573 f, 0.584294 f, 0.585014 f, 0.585735 f, 0.586455 f, 0.587176 f, 0.587896 f, 0.588617 f, 0.589337 f, 0.590058 f, 0.590778 f, 0.591499 f, 0.592219 f, 0.592939 f, 0.593660 f, 0.594380 f, 0.595101 f, 0.595821 f, 0.596542 f, 0.597262 f, 0.597983 f, 0.598703 f, 0.599424 f, 0.600144 f, 0.600865 f, 0.601585 f, 0.602305 f, 0.603026 f, 0.603746 f, 0.604467 f, 0.605187 f, 0.605908 f, 0.606628 f, 0.607349 f, 0.608069 f, 0.608790 f, 0.609510 f, 0.610231 f, 0.610951 f, 0.611671 f, 0.612392 f, 0.613112 f, 0.613833 f, 0.614553 f, 0.615274 f, 0.615994 f, 0.616715 f, 0.617435 f, 0.618156 f, 0.618876 f, 0.619597 f, 0.620317 f, 0.621037 f, 0.621758 f, 0.622478 f, 0.623199 f, 0.623919 f, 0.624640 f, 0.625360 f, 0.626081 f, 0.626801 f, 0.627522 f, 0.628242 f, 0.628963 f, 0.629683 f, 0.630403 f, 0.631124 f, 0.631844 f, 0.632565 f, 0.633285 f, 0.634006 f, 0.634726 f, 0.635447 f, 0.636167 f, 0.636888 f, 0.637608 f, 0.638329 f, 0.639049 f, 0.639769 f, 0.640490 f, 0.641210 f, 0.641931 f, 0.642651 f, 0.643372 f, 0.644092 f, 0.644813 f, 0.645533 f, 0.646254 f, 0.646974 f, 0.647695 f, 0.648415 f, 0.649135 f, 0.649856 f, 0.650576 f, 0.651297 f, 0.652017 f, 0.652738 f, 0.653458 f, 0.654179 f, 0.654899 f, 0.655620 f, 0.656340 f, 0.657061 f, 0.657781 f, 0.658501 f, 0.659222 f, 0.659942 f, 0.660663 f, 0.661383 f, 0.662104 f, 0.662824 f, 0.663545 f, 0.664265 f, 0.664986 f, 0.665706 f, 0.666427 f, 0.667147 f, 0.667867 f, 0.668588 f, 0.669308 f, 0.670029 f, 0.670749 f, 0.671470 f, 0.672190 f, 0.672911 f, 0.673631 f, 0.674352 f, 0.675072 f, 0.675793 f, 0.676513 f, 0.677233 f, 0.677954 f, 0.678674 f, 0.679395 f, 0.680115 f, 0.680836 f, 0.681556 f, 0.682277 f, 0.682997 f, 0.683718 f, 0.684438 f, 0.685158 f, 0.685879 f, 0.686599 f, 0.687320 f, 0.688040 f, 0.688761 f, 0.689481 f, 0.690202 f, 0.690922 f, 0.691643 f, 0.692363 f, 0.693084 f, 0.693804 f, 0.694524 f, 0.695245 f, 0.695965 f, 0.696686 f, 0.697406 f, 0.698127 f, 0.698847 f, 0.699568 f, 0.700288 f, 0.701009 f, 0.701729 f, 0.702450 f, 0.703170 f, 0.703891 f, 0.704611 f, 0.705331 f, 0.706052 f, 0.706772 f, 0.707493 f, 0.708213 f, 0.708934 f, 0.709654 f, 0.710375 f, 0.711095 f, 0.711816 f, 0.712536 f, 0.713256 f, 0.713977 f, 0.714697 f, 0.715418 f, 0.716138 f, 0.716859 f, 0.717579 f, 0.718300 f, 0.719020 f, 0.719741 f, 0.720461 f, 0.721182 f, 0.721902 f, 0.722622 f, 0.723343 f, 0.724063 f, 0.724784 f, 0.725504 f, 0.726225 f, 0.726945 f, 0.727666 f, 0.728386 f, 0.729107 f, 0.729827 f, 0.730548 f, 0.731268 f, 0.731988 f, 0.732709 f, 0.733429 f, 0.734150 f, 0.734870 f, 0.735591 f, 0.736311 f, 0.737032 f, 0.737752 f, 0.738473 f, 0.739193 f, 0.739914 f, 0.740634 f, 0.741354 f, 0.742075 f, 0.742795 f, 0.743516 f, 0.744236 f, 0.744957 f, 0.745677 f, 0.746398 f, 0.747118 f, 0.747839 f, 0.748559 f, 0.749280 f, 0.750000 f, 0.750720 f, 0.751441 f, 0.752161 f, 0.752882 f, 0.753602 f, 0.754323 f, 0.755043 f, 0.755764 f, 0.756484 f, 0.757205 f, 0.757925 f, 0.758646 f, 0.759366 f, 0.760086 f, 0.760807 f, 0.761527 f, 0.762248 f, 0.762968 f, 0.763689 f, 0.764409 f, 0.765130 f, 0.765850 f, 0.766571 f, 0.767291 f, 0.768012 f, 0.768732 f, 0.769452 f, 0.770173 f, 0.770893 f, 0.771614 f, 0.772334 f, 0.773055 f, 0.773775 f, 0.774496 f, 0.775216 f, 0.775937 f, 0.776657 f, 0.777378 f, 0.778098 f, 0.778818 f, 0.779539 f, 0.780259 f, 0.780980 f, 0.781700 f, 0.782421 f, 0.783141 f, 0.783862 f, 0.784582 f, 0.785303 f, 0.786023 f, 0.786744 f, 0.787464 f, 0.788184 f, 0.788905 f, 0.789625 f, 0.790346 f, 0.791066 f, 0.791787 f, 0.792507 f, 0.793228 f, 0.793948 f, 0.794669 f, 0.795389 f, 0.796109 f, 0.796830 f, 0.797550 f, 0.798271 f, 0.798991 f, 0.799712 f, 0.800432 f, 0.801153 f, 0.801873 f, 0.802594 f, 0.803314 f, 0.804035 f, 0.804755 f, 0.805476 f, 0.806196 f, 0.806916 f, 0.807637 f, 0.808357 f, 0.809078 f, 0.809798 f, 0.810519 f, 0.811239 f, 0.811960 f, 0.812680 f, 0.813401 f, 0.814121 f, 0.814842 f, 0.815562 f, 0.816282 f, 0.817003 f, 0.817723 f, 0.818444 f, 0.819164 f, 0.819885 f, 0.820605 f, 0.821326 f, 0.822046 f, 0.822767 f, 0.823487 f, 0.824207 f, 0.824928 f, 0.825648 f, 0.826369 f, 0.827089 f, 0.827810 f, 0.828530 f, 0.829251 f, 0.829971 f, 0.830692 f, 0.831412 f, 0.832133 f, 0.832853 f, 0.833573 f, 0.834294 f, 0.835014 f, 0.835735 f, 0.836455 f, 0.837176 f, 0.837896 f, 0.838617 f, 0.839337 f, 0.840058 f, 0.840778 f, 0.841499 f, 0.842219 f, 0.842939 f, 0.843660 f, 0.844380 f, 0.845101 f, 0.845821 f, 0.846542 f, 0.847262 f, 0.847983 f, 0.848703 f, 0.849424 f, 0.850144 f, 0.850865 f, 0.851585 f, 0.852305 f, 0.853026 f, 0.853746 f, 0.854467 f, 0.855187 f, 0.855908 f, 0.856628 f, 0.857349 f, 0.858069 f, 0.858790 f, 0.859510 f, 0.860231 f, 0.860951 f, 0.861671 f, 0.862392 f, 0.863112 f, 0.863833 f, 0.864553 f, 0.865274 f, 0.865994 f, 0.866715 f, 0.867435 f, 0.868156 f, 0.868876 f, 0.869597 f, 0.870317 f, 0.871037 f, 0.871758 f, 0.872478 f, 0.873199 f, 0.873919 f, 0.874640 f, 0.875360 f, 0.876081 f, 0.876801 f, 0.877522 f, 0.878242 f, 0.878963 f, 0.879683 f, 0.880403 f, 0.881124 f, 0.881844 f, 0.882565 f, 0.883285 f, 0.884006 f, 0.884726 f, 0.885447 f, 0.886167 f, 0.886888 f, 0.887608 f, 0.888329 f, 0.889049 f, 0.889769 f, 0.890490 f, 0.891210 f, 0.891931 f, 0.892651 f, 0.893372 f, 0.894092 f, 0.894813 f, 0.895533 f, 0.896254 f, 0.896974 f, 0.897695 f, 0.898415 f, 0.899135 f, 0.899856 f, 0.900576 f, 0.901297 f, 0.902017 f, 0.902738 f, 0.903458 f, 0.904179 f, 0.904899 f, 0.905620 f, 0.906340 f, 0.907061 f, 0.907781 f, 0.908501 f, 0.909222 f, 0.909942 f, 0.910663 f, 0.911383 f, 0.912104 f, 0.912824 f, 0.913545 f, 0.914265 f, 0.914986 f, 0.915706 f, 0.916427 f, 0.917147 f, 0.917867 f, 0.918588 f, 0.919308 f, 0.920029 f, 0.920749 f, 0.921470 f, 0.922190 f, 0.922911 f, 0.923631 f, 0.924352 f, 0.925072 f, 0.925793 f, 0.926513 f, 0.927233 f, 0.927954 f, 0.928674 f, 0.929395 f, 0.930115 f, 0.930836 f, 0.931556 f, 0.932277 f, 0.932997 f, 0.933718 f, 0.934438 f, 0.935158 f, 0.935879 f, 0.936599 f, 0.937320 f, 0.938040 f, 0.938761 f, 0.939481 f, 0.940202 f, 0.940922 f, 0.941643 f, 0.942363 f, 0.943084 f, 0.943804 f, 0.944524 f, 0.945245 f, 0.945965 f, 0.946686 f, 0.947406 f, 0.948127 f, 0.948847 f, 0.949568 f, 0.950288 f, 0.951009 f, 0.951729 f, 0.952450 f, 0.953170 f, 0.953891 f, 0.954611 f, 0.955331 f, 0.956052 f, 0.956772 f, 0.957493 f, 0.958213 f, 0.958934 f, 0.959654 f, 0.960375 f, 0.961095 f, 0.961816 f, 0.962536 f, 0.963256 f, 0.963977 f, 0.964697 f, 0.965418 f, 0.966138 f, 0.966859 f, 0.967579 f, 0.968300 f, 0.969020 f, 0.969741 f, 0.970461 f, 0.971182 f, 0.971902 f, 0.972622 f, 0.973343 f, 0.974063 f, 0.974784 f, 0.975504 f, 0.976225 f, 0.976945 f, 0.977666 f, 0.978386 f, 0.979107 f, 0.979827 f, 0.980548 f, 0.981268 f, 0.981988 f, 0.982709 f, 0.983429 f, 0.984150 f, 0.984870 f, 0.985591 f, 0.986311 f, 0.987032 f, 0.987752 f, 0.988473 f, 0.989193 f, 0.989914 f, 0.990634 f, 0.991354 f, 0.992075 f, 0.992795 f, 0.993516 f, 0.994236 f, 0.994957 f, 0.995677 f, 0.996398 f, 0.997118 f, 0.997839 f, 0.998559 f, 0.999280 f, 1.000000 f }; CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket * y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = MagickTrue; progress = 0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* * Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LinearGRAYColorspace: case GRAYColorspace: { /* * Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray = (MagickRealType) GetPixelGray(image, q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray = EncodePixelGamma(gray); SetPixelRed(image, ClampToQuantum(gray), q); SetPixelGreen(image, ClampToQuantum(gray), q); SetPixelBlue(image, ClampToQuantum(gray), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* * Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X = QuantumScale * GetPixelRed(image, q); Y = QuantumScale * GetPixelGreen(image, q); Z = QuantumScale * GetPixelBlue(image, q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X, Y, Z, &red, &green, &blue); break; } case HCLColorspace: { ConvertHCLToRGB(X, Y, Z, &red, &green, &blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X, Y, Z, &red, &green, &blue); break; } case HSBColorspace: { ConvertHSBToRGB(X, Y, Z, &red, &green, &blue); break; } case HSIColorspace: { ConvertHSIToRGB(X, Y, Z, &red, &green, &blue); break; } case HSLColorspace: { ConvertHSLToRGB(X, Y, Z, &red, &green, &blue); break; } case HSVColorspace: { ConvertHSVToRGB(X, Y, Z, &red, &green, &blue); break; } case HWBColorspace: { ConvertHWBToRGB(X, Y, Z, &red, &green, &blue); break; } case LabColorspace: { ConvertLabToRGB(X, Y, Z, &red, &green, &blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X, Y, Z, &red, &green, &blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X, Y, Z, &red, &green, &blue); break; } case LMSColorspace: { ConvertLMSToRGB(X, Y, Z, &red, &green, &blue); break; } case LuvColorspace: { ConvertLuvToRGB(X, Y, Z, &red, &green, &blue); break; } case xyYColorspace: { ConvertxyYToRGB(X, Y, Z, &red, &green, &blue); break; } case XYZColorspace: { ConvertXYZToRGB(X, Y, Z, &red, &green, &blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X, Y, Z, &red, &green, &blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X, Y, Z, &red, &green, &blue); break; } case YIQColorspace: { ConvertYIQToRGB(X, Y, Z, &red, &green, &blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X, Y, Z, &red, &green, &blue); break; } case YUVColorspace: { ConvertYUVToRGB(X, Y, Z, &red, &green, &blue); break; } default: { red = QuantumRange * X; green = QuantumRange * Y; blue = QuantumRange * Z; break; } } SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum * logmap; /* * Transform Log to sRGB colorspace. */ density = DisplayGamma; gamma = DisplayGamma; value = GetImageProperty(image, "gamma", exception); if (value != (const char *)NULL) gamma = PerceptibleReciprocal(StringToDouble(value, (char **)NULL)); film_gamma = FilmGamma; value = GetImageProperty(image, "film-gamma", exception); if (value != (const char *)NULL) film_gamma = StringToDouble(value, (char **)NULL); reference_black = ReferenceBlack; value = GetImageProperty(image, "reference-black", exception); if (value != (const char *)NULL) reference_black = StringToDouble(value, (char **)NULL); reference_white = ReferenceWhite; value = GetImageProperty(image, "reference-white", exception); if (value != (const char *)NULL) reference_white = StringToDouble(value, (char **)NULL); logmap = (Quantum *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); black = pow(10.0, (reference_black - reference_white) * (gamma / density) * 0.002 / film_gamma); for (i = 0; i <= (ssize_t) (reference_black * MaxMap / 1024.0); i++) logmap[i] = (Quantum) 0; for (; i < (ssize_t) (reference_white * MaxMap / 1024.0); i++) logmap[i] = ClampToQuantum(QuantumRange / (1.0 - black) * (pow(10.0, (1024.0 * i / MaxMap - reference_white) * (gamma / density) * 0.002 / film_gamma) - black)); for (; i <= (ssize_t) MaxMap; i++) logmap[i] = QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { double blue, green, red; red = (double)logmap[ScaleQuantumToMap(GetPixelRed(image, q))]; green = (double)logmap[ScaleQuantumToMap(GetPixelGreen(image, q))]; blue = (double)logmap[ScaleQuantumToMap(GetPixelBlue(image, q))]; SetPixelRed(image, ClampToQuantum(EncodePixelGamma((MagickRealType) red)), q); SetPixelGreen(image, ClampToQuantum(EncodePixelGamma((MagickRealType) green)), q); SetPixelBlue(image, ClampToQuantum(EncodePixelGamma((MagickRealType) blue)), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); logmap = (Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } case RGBColorspace: case scRGBColorspace: { /* * Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = (ssize_t) image->columns; x != 0; x--) { double blue, green, red; red = EncodePixelGamma((MagickRealType) GetPixelRed(image, q)); green = EncodePixelGamma((MagickRealType) GetPixelGreen(image, q)); blue = EncodePixelGamma((MagickRealType) GetPixelBlue(image, q)); SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (status); } default: break; } /* * Allocate the tables. */ x_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*x_map)); y_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*y_map)); z_map = (TransformPacket *) AcquireQuantumMemory((size_t) MaxMap + 1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map = (TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map = (TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map = (TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* * Initialize OHTA tables: * * I1 = 0.33333*R+0.33334*G+0.33333*B I2 = * 0.50000*R+0.00000*G-0.50000*B I3 * =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G * = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 * * I and Q, normally -0.5 through 0.5, must be normalized to the * range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * (double)i); y_map[i].x = (MagickRealType) (0.5 * 1.00000 * (2.0 * (double)i - MaxMap)); z_map[i].x = (MagickRealType) (-0.5 * 0.66668 * (2.0 * (double)i - MaxMap)); x_map[i].y = (MagickRealType) (1.0 * (double)i); y_map[i].y = (MagickRealType) (0.5 * 0.00000 * (2.0 * (double)i - MaxMap)); z_map[i].y = (MagickRealType) (0.5 * 1.33333 * (2.0 * (double)i - MaxMap)); x_map[i].z = (MagickRealType) (1.0 * (double)i); y_map[i].z = (MagickRealType) (-0.5 * 1.00000 * (2.0 * (double)i - MaxMap)); z_map[i].z = (MagickRealType) (-0.5 * 0.66668 * (2.0 * (double)i - MaxMap)); } break; } case Rec601YCbCrColorspace: { /* * Initialize YCbCr tables: * * R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = * Y+1.772000*Cb * * Cb and Cr, normally -0.5 through 0.5, must be normalized to the * range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = 0.99999999999914679361 * (double)i; y_map[i].x = 0.5 * (-1.2188941887145875e-06) * (2.00 * (double)i - MaxMap); z_map[i].x = 0.5 * 1.4019995886561440468 * (2.00 * (double)i - MaxMap); x_map[i].y = 0.99999975910502514331 * (double)i; y_map[i].y = 0.5 * (-0.34413567816504303521) * (2.00 * (double)i - MaxMap); z_map[i].y = 0.5 * (-0.71413649331646789076) * (2.00 * (double)i - MaxMap); x_map[i].z = 1.00000124040004623180 * (double)i; y_map[i].z = 0.5 * 1.77200006607230409200 * (2.00 * (double)i - MaxMap); z_map[i].z = 0.5 * 2.1453384174593273e-06 * (2.00 * (double)i - MaxMap); } break; } case Rec709YCbCrColorspace: { /* * Initialize YCbCr tables: * * R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = * Y+1.855600*Cb * * Cb and Cr, normally -0.5 through 0.5, must be normalized to the * range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * i); y_map[i].x = (MagickRealType) (0.5 * 0.000000 * (2.0 * i - MaxMap)); z_map[i].x = (MagickRealType) (0.5 * 1.574800 * (2.0 * i - MaxMap)); x_map[i].y = (MagickRealType) (1.0 * i); y_map[i].y = (MagickRealType) (0.5 * (-0.187324) * (2.0 * i - MaxMap)); z_map[i].y = (MagickRealType) (0.5 * (-0.468124) * (2.0 * i - MaxMap)); x_map[i].z = (MagickRealType) (1.0 * i); y_map[i].z = (MagickRealType) (0.5 * 1.855600 * (2.0 * i - MaxMap)); z_map[i].z = (MagickRealType) (0.5 * 0.000000 * (2.0 * i - MaxMap)); } break; } case YCCColorspace: { /* * Initialize YCC tables: * * R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = * Y+1.632639*C1 * * YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.3584000 * (double)i); y_map[i].x = (MagickRealType) 0.0000000; z_map[i].x = (MagickRealType) (1.8215000 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y = (MagickRealType) (1.3584000 * (double)i); y_map[i].y = (MagickRealType) (-0.4302726 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y = (MagickRealType) (-0.9271435 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z = (MagickRealType) (1.3584000 * (double)i); y_map[i].z = (MagickRealType) (2.2179000 * (1.0 * (double)i - (double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z = (MagickRealType) 0.0000000; } break; } default: { /* * Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i = 0; i <= (ssize_t) MaxMap; i++) { x_map[i].x = (MagickRealType) (1.0 * (double)i); y_map[i].x = (MagickRealType) 0.0; z_map[i].x = (MagickRealType) 0.0; x_map[i].y = (MagickRealType) 0.0; y_map[i].y = (MagickRealType) (1.0 * (double)i); z_map[i].y = (MagickRealType) 0.0; x_map[i].z = (MagickRealType) 0.0; y_map[i].z = (MagickRealType) 0.0; z_map[i].z = (MagickRealType) (1.0 * (double)i); } break; } } /* * Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* * Convert DirectClass image. */ image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red = ScaleQuantumToMap(GetPixelRed(image, q)); green = ScaleQuantumToMap(GetPixelGreen(image, q)); blue = ScaleQuantumToMap(GetPixelBlue(image, q)); pixel.red = x_map[red].x + y_map[green].x + z_map[blue].x; pixel.green = x_map[red].y + y_map[green].y + z_map[blue].y; pixel.blue = x_map[red].z + y_map[green].z + z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.red / (double)MaxMap)]; pixel.green = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.green / (double)MaxMap)]; pixel.blue = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.blue / (double)MaxMap)]; } else { pixel.red = (MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green = (MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue = (MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image, ClampToQuantum(pixel.red), q); SetPixelGreen(image, ClampToQuantum(pixel.green), q); SetPixelBlue(image, ClampToQuantum(pixel.blue), q); q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransformsRGBImage) #endif proceed = SetImageProgress(image, TransformsRGBImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); break; } case PseudoClass: { /* * Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i = 0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue = ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red = x_map[red].x + y_map[green].x + z_map[blue].x; pixel.green = x_map[red].y + y_map[green].y + z_map[blue].y; pixel.blue = x_map[red].z + y_map[green].z + z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.red / (double)MaxMap)]; pixel.green = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.green / (double)MaxMap)]; pixel.blue = QuantumRange * YCCMap[RoundToYCC(1024.0 * pixel.blue / (double)MaxMap)]; } else { pixel.red = (MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green = (MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue = (MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red = (double)ClampToQuantum(pixel.red); image->colormap[i].green = (double)ClampToQuantum(pixel.green); image->colormap[i].blue = (double)ClampToQuantum(pixel.blue); } (void)SyncImage(image, exception); break; } } /* * Relinquish resources. */ z_map = (TransformPacket *) RelinquishMagickMemory(z_map); y_map = (TransformPacket *) RelinquishMagickMemory(y_map); x_map = (TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image, sRGBColorspace, exception) == MagickFalse) return (MagickFalse); return (MagickTrue); }
8321.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj)) { //printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj), DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i - 1][j - 1] + 0.5 * A[i - 1][j] + -0.8 * A[i - 1][j + 1] + -0.3 * A[i][j - 1] + 0.6 * A[i][j] + -0.9 * A[i][j + 1] + 0.4 * A[i + 1][j - 1] + 0.7 * A[i + 1][j] + 0.1 * A[i + 1][j + 1]; } } #pragma endscop //printf("Kernal computation complete !!\n"); } int main(int argc, char **argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array(ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d(ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj)) { //printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj), DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i - 1][j - 1] + 0.5 * A[i - 1][j] + -0.8 * A[i - 1][j + 1] + -0.3 * A[i][j - 1] + 0.6 * A[i][j] + -0.9 * A[i][j + 1] + 0.4 * A[i + 1][j - 1] + 0.7 * A[i + 1][j] + 0.1 * A[i + 1][j + 1]; } } #pragma endscop //printf("Kernal computation complete !!\n"); } int main(int argc, char **argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array(ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d(ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
idasFoodWeb_bnd_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * Based on idaFoodWeb_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2020, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDAS: Food web problem. * * This example program (OpenMP version) uses the SUNBAND linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDAS using the SUNBAND linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idasFoodWeb_bnd_omp * To specify the number of threads at the command line, use * % ./idasFoodWeb_bnd_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <idas/idas.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); LS = SUNLinSol_Band(cc, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, A); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int) mu, (long int) ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre+nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <idas/idas.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05)/* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0)/* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. IJ_Vptr is defined in * order to express the underlying 3-D structure of the dependent variable * vector from its underlying 1-D storage (an N_Vector). IJ_Vptr(vv,i,j) * returns a pointer to the location in vv corresponding to species index is * = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype * cxy, realtype * ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype * x1, realtype * x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* * -------------------------------------------------------------------- MAIN * PROGRAM * -------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ if (argc > 1) /* overwrite with command line value, if * supplied */ num_threads = (int)strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if (check_retval((void *)cc, "N_VNew_OpenMP", 0)) return (1); cp = N_VNew_OpenMP(NEQ, num_threads); if (check_retval((void *)cp, "N_VNew_OpenMP", 0)) return (1); id = N_VNew_OpenMP(NEQ, num_threads); if (check_retval((void *)id, "N_VNew_OpenMP", 0)) return (1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if (check_retval((void *)ida_mem, "IDACreate", 0)) return (1); retval = IDASetUserData(ida_mem, webdata); if (check_retval(&retval, "IDASetUserData", 1)) return (1); retval = IDASetId(ida_mem, id); if (check_retval(&retval, "IDASetId", 1)) return (1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if (check_retval(&retval, "IDAInit", 1)) return (1); retval = IDASStolerances(ida_mem, rtol, atol); if (check_retval(&retval, "IDASStolerances", 1)) return (1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml); if (check_retval((void *)A, "SUNBandMatrix", 0)) return (1); LS = SUNLinSol_Band(cc, A); if (check_retval((void *)LS, "SUNLinSol_Band", 0)) return (1); retval = IDASetLinearSolver(ida_mem, LS, A); if (check_retval(&retval, "IDASetLinearSolver", 1)) return (1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if (check_retval(&retval, "IDACalcIC", 1)) return (1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if (check_retval(&retval, "IDASolve", 1)) return (retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); return (0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* * -------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA * -------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. This routine * calls Fweb to get all the right-hand sides of the equations, then loads * the residual vector accordingly, using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData) user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* * Loop over all grid points, setting residual values appropriately for * differential or algebraic components. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc + is] = cpv[loc + is] - resv[loc + is]; else resv[loc + is] = -resv[loc + is]; } } } return (0); } /* * -------------------------------------------------------------------- * PRIVATE FUNCTIONS * -------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1, *a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX / (MX - 1); webdata->dy = AY / (MY - 1); webdata->Neq = NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx) * (webdata->dx); dy2 = (webdata->dy) * (webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i + np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i + np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i + np][i + np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i + np] = -BB; cox[i] = DPREY / dx2; cox[i + np] = DPRED / dx2; coy[i] = DPREY / dy2; coy[i + np] = DPRED / dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. A polynomial * profile is used for the prey cc values, and a constant (1.0e5) is loaded * as the initial guess for the predator cc values. The id values are set to * 1 for the prey and 0 for the predators. The prey cp values are set * according to the given system, and the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0) * xx * (ONE - xx) * yy * (ONE - yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc + is] = RCONST(10.0) + (realtype) (is + 1) * xyfactor; idv[loc + is] = ONE; } else { ccv[loc + is] = RCONST(1.0e5); idv[loc + is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc + is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int)mu, (long int)ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. Selected run * statistics are printed. Then values of the concentrations are printed for * the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c, 0, 0); c_tr = IJ_Vptr(c, MX - 1, MY - 1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i = 1; i < NUM_SPECIES; i++) printf(" %12.4Le %12.4Le |\n", c_bl[i], c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i = 1; i < NUM_SPECIES; i++) printf(" %12.4e %12.4e |\n", c_bl[i], c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i = 1; i < NUM_SPECIES; i++) printf(" %12.4e %12.4e |\n", c_bl[i], c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre + nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. This routine computes the * right-hand sides of the system equations, consisting of the diffusion term * and interaction term. The interaction term is computed by the function * WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* * Loop over grid points, evaluate interaction vector (length ns), form * diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy; idyu = (jy != MY - 1) ? NSMX : -NSMX; idyl = (jy != 0) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx != MX - 1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx != 0) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc, jx, jy); ratesxy = IJ_Vptr(webdata->rates, jx, jy); cratexy = IJ_Vptr(crate, jx, jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy + is) - *(cxy - idyl + is); dcyui = *(cxy + idyu + is) - *(cxy + is); /* Differencing in x. */ dcxli = *(cxy + is) - *(cxy - idxl + is); dcxui = *(cxy + idxu + is) - *(cxy + is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. At a given * (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype * cxy, realtype * ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA * xx * yy + BETA * sin(FOURPI * xx) * sin(FOURPI * yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is] * (bcoef[is] * fac + ratesxy[is]); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype * x1, realtype * x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return (temp); } /* * Check function return value... opt == 0 means SUNDIALS function allocates * memory so check if returned NULL pointer opt == 1 means SUNDIALS function * returns an integer value so check if retval < 0 opt == 2 means function * allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* * Check if SUNDIALS function returned NULL pointer - no memory * allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return (1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *)returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return (1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return (1); } return (0); }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <idas/idas.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05)/* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0)/* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. IJ_Vptr is defined in * order to express the underlying 3-D structure of the dependent variable * vector from its underlying 1-D storage (an N_Vector). IJ_Vptr(vv,i,j) * returns a pointer to the location in vv corresponding to species index is * = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype * cxy, realtype * ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype * x1, realtype * x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* * -------------------------------------------------------------------- MAIN * PROGRAM * -------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with * OMP_NUM_THREADS enviroment * variable */ #endif if (argc > 1) /* overwrite with command line value, if * supplied */ num_threads = (int)strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if (check_retval((void *)cc, "N_VNew_OpenMP", 0)) return (1); cp = N_VNew_OpenMP(NEQ, num_threads); if (check_retval((void *)cp, "N_VNew_OpenMP", 0)) return (1); id = N_VNew_OpenMP(NEQ, num_threads); if (check_retval((void *)id, "N_VNew_OpenMP", 0)) return (1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if (check_retval((void *)ida_mem, "IDACreate", 0)) return (1); retval = IDASetUserData(ida_mem, webdata); if (check_retval(&retval, "IDASetUserData", 1)) return (1); retval = IDASetId(ida_mem, id); if (check_retval(&retval, "IDASetId", 1)) return (1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if (check_retval(&retval, "IDAInit", 1)) return (1); retval = IDASStolerances(ida_mem, rtol, atol); if (check_retval(&retval, "IDASStolerances", 1)) return (1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml); if (check_retval((void *)A, "SUNBandMatrix", 0)) return (1); LS = SUNLinSol_Band(cc, A); if (check_retval((void *)LS, "SUNLinSol_Band", 0)) return (1); retval = IDASetLinearSolver(ida_mem, LS, A); if (check_retval(&retval, "IDASetLinearSolver", 1)) return (1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if (check_retval(&retval, "IDACalcIC", 1)) return (1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if (check_retval(&retval, "IDASolve", 1)) return (retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); return (0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* * -------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA * -------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. This routine * calls Fweb to get all the right-hand sides of the equations, then loads * the residual vector accordingly, using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData) user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* * Loop over all grid points, setting residual values appropriately for * differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc + is] = cpv[loc + is] - resv[loc + is]; else resv[loc + is] = -resv[loc + is]; } } } return (0); } /* * -------------------------------------------------------------------- * PRIVATE FUNCTIONS * -------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1, *a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX / (MX - 1); webdata->dy = AY / (MY - 1); webdata->Neq = NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx) * (webdata->dx); dy2 = (webdata->dy) * (webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i + np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i + np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i + np][i + np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i + np] = -BB; cox[i] = DPREY / dx2; cox[i + np] = DPRED / dx2; coy[i] = DPREY / dy2; coy[i + np] = DPRED / dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. A polynomial * profile is used for the prey cc values, and a constant (1.0e5) is loaded * as the initial guess for the predator cc values. The id values are set to * 1 for the prey and 0 for the predators. The prey cp values are set * according to the given system, and the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0) * xx * (ONE - xx) * yy * (ONE - yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc + is] = RCONST(10.0) + (realtype) (is + 1) * xyfactor; idv[loc + is] = ONE; } else { ccv[loc + is] = RCONST(1.0e5); idv[loc + is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc + is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int)mu, (long int)ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. Selected run * statistics are printed. Then values of the concentrations are printed for * the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c, 0, 0); c_tr = IJ_Vptr(c, MX - 1, MY - 1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i = 1; i < NUM_SPECIES; i++) printf(" %12.4Le %12.4Le |\n", c_bl[i], c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i = 1; i < NUM_SPECIES; i++) printf(" %12.4e %12.4e |\n", c_bl[i], c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i = 1; i < NUM_SPECIES; i++) printf(" %12.4e %12.4e |\n", c_bl[i], c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre + nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. This routine computes the * right-hand sides of the system equations, consisting of the diffusion term * and interaction term. The interaction term is computed by the function * WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* * Loop over grid points, evaluate interaction vector (length ns), form * diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy; idyu = (jy != MY - 1) ? NSMX : -NSMX; idyl = (jy != 0) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx != MX - 1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx != 0) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc, jx, jy); ratesxy = IJ_Vptr(webdata->rates, jx, jy); cratexy = IJ_Vptr(crate, jx, jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy + is) - *(cxy - idyl + is); dcyui = *(cxy + idyu + is) - *(cxy + is); /* Differencing in x. */ dcxli = *(cxy + is) - *(cxy - idxl + is); dcxui = *(cxy + idxu + is) - *(cxy + is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. At a given * (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype * cxy, realtype * ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA * xx * yy + BETA * sin(FOURPI * xx) * sin(FOURPI * yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is] * (bcoef[is] * fac + ratesxy[is]); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype * x1, realtype * x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return (temp); } /* * Check function return value... opt == 0 means SUNDIALS function allocates * memory so check if returned NULL pointer opt == 1 means SUNDIALS function * returns an integer value so check if retval < 0 opt == 2 means function * allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* * Check if SUNDIALS function returned NULL pointer - no memory * allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return (1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *)returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return (1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return (1); } return (0); }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { const Image *next; FxInfo *fx_info; register ssize_t i; unsigned char fx_op[2]; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(unsigned char) BitwiseAndAssignmentOperator; (void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op); *fx_op=(unsigned char) BitwiseOrAssignmentOperator; (void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op); *fx_op=(unsigned char) LeftShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op); *fx_op=(unsigned char) RightShiftAssignmentOperator; (void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op); *fx_op=(unsigned char) PowerAssignmentOperator; (void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op); *fx_op=(unsigned char) ModuloAssignmentOperator; (void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op); *fx_op=(unsigned char) PlusAssignmentOperator; (void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op); *fx_op=(unsigned char) SubtractAssignmentOperator; (void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op); *fx_op=(unsigned char) MultiplyAssignmentOperator; (void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op); *fx_op=(unsigned char) DivideAssignmentOperator; (void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op); *fx_op=(unsigned char) IncrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"++",(char *) fx_op); *fx_op=(unsigned char) DecrementAssignmentOperator; (void) SubstituteString(&fx_info->expression,"--",(char *) fx_op); *fx_op=(unsigned char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op); *fx_op=(unsigned char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",(char *) fx_op); *fx_op=(unsigned char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op); *fx_op=(unsigned char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",(char *) fx_op); *fx_op=(unsigned char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",(char *) fx_op); *fx_op=(unsigned char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op); *fx_op=(unsigned char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op); *fx_op=(unsigned char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",(char *) fx_op); *fx_op=(unsigned char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",(char *) fx_op); /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info, const char *symbol) { return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo *magick_restrict fx_info,const char *magick_restrict symbol, double const value) { double *object; object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol); if (object != (double *) NULL) { *object=value; return(MagickTrue); } object=(double *) AcquireQuantumMemory(1,sizeof(*object)); if (object == (double *) NULL) { (void) ThrowMagickException(fx_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", fx_info->images->filename); return(MagickFalse); } *object=value; return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object)); } static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1UL << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=GetFxSymbolValue(fx_info,key); if (value != (const double *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*(*value)); } statistic=0.0; if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); statistic=(double) depth; } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=kurtosis; } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=maxima; } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=mean; } if (LocaleNCompare(symbol,"median",6) == 0) { double median; (void) GetImageMedian(image,&median,exception); statistic=median; } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); statistic=minima; } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); statistic=skewness; } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); statistic=standard_deviation; } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse) return(0.0); return(QuantumScale*statistic); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name,const size_t length) { int c; register size_t i; for (i=0; i <= length; i++) if (expression[i] == '\0') return(MagickFalse); c=expression[length]; if ((LocaleNCompare(expression,name,length) == 0) && ((isspace(c) == 0) || (c == '('))) return(MagickTrue); return(MagickFalse); } static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression,const size_t depth, ExceptionInfo *exception) { char *q, symbol[MagickPathExtent]; const char *p; const double *value; double alpha, beta; Image *image; MagickBooleanType status; PixelInfo pixel; PointInfo point; register ssize_t i; size_t level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { char *subexpression; subexpression=AcquireString(expression); if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, depth,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression=DestroyString(subexpression); } image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } i=GetImageIndexInList(image); GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void) CopyMagickString(name,p,MagickPathExtent); length=strlen(name); for (q=name+length-1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } q=name; if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') && (GetFxSymbolValue(fx_info,name) == (const double *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=length; } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors, ConstantString(name),ClonePixelInfo(&pixel)); p+=length; } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } case IndexPixelChannel: return(0.0); default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol,"channel",7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol,"extent") == 0) { if (image->extent != 0) return((double) image->extent); return((double) GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"median",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); if (LocaleCompare(symbol,"printsize.x") == 0) return(PerceptibleReciprocal(image->resolution.x)*image->columns); if (LocaleCompare(symbol,"printsize.y") == 0) return(PerceptibleReciprocal(image->resolution.y)*image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double) GetImageDepth(image,fx_info->exception)); break; } default: break; } value=GetFxSymbolValue(fx_info,symbol); if (value != (const double *) NULL) return(*value); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UndefinedVariable","`%s'",symbol); (void) SetFxSymbolValue(fx_info,symbol,0.0); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { expression+=5; break; } #endif if (IsFxFunction(expression,"atan2",5) != MagickFalse) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression,"j0",2) != MagickFalse) || (IsFxFunction(expression,"j1",2) != MagickFalse)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence=AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; register const char *p; *beta=0.0; sans=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(PerceptibleReciprocal(*beta)*alpha); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case BitwiseAndAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (*beta+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5)); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=pow(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=fmod(alpha,*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha*PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha+1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=alpha-1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info,p,value) == MagickFalse) return(0.0); } else if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent-1); FxParseConditional(subexpression,':',p,q); if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(gamma); } case '=': { q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); value=(*beta); if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse) return(0.0); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); length=CopyMagickString(subexpression,expression+1,MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (IsFxFunction(expression,"abs",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression,"acosh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression,"acos",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"airy",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression,"asinh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression,"asin",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression,"alt",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression,"atan2",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression,"atanh",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression,"atan",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression,"ceil",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression,"clamp",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression,"cosh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression,"cos",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression,"debug",5) != MagickFalse) { const char *type; size_t length; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="alpha"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type="gray"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="alpha"; break; default: type="unknown"; break; } break; } } *subexpression='\0'; length=1; if (strlen(expression) > 6) length=CopyMagickString(subexpression,expression+6, MagickPathExtent); if (length != 0) subexpression[length-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(alpha); } if (IsFxFunction(expression,"do",2) != MagickFalse) { size_t length; /* Parse do(expression,condition test). */ length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression,"drc",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression,"erf",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression,"exp",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression,"floor",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"for",3) != MagickFalse) { double sans = 0.0; size_t length; /* Parse for(initialization, condition test, expression). */ length=CopyMagickString(subexpression,expression+4, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression,"gauss",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI)); } if (IsFxFunction(expression,"gcd",3) != MagickFalse) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); FxReturn((double) gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"hypot",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression,"if",2) != MagickFalse) { double sans = 0.0; size_t length; length=CopyMagickString(subexpression,expression+3, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); (void) CopyMagickString(subexpression,q+1,MagickPathExtent-1); FxParseConditional(subexpression,',',p,q); if (fabs(alpha) >= MagickEpsilon) alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta, exception); FxReturn(alpha); } if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"int",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression,"isnan",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression,"j0",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"j1",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression,"jinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression,"ln",2) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (IsFxFunction(expression,"logtwo",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (IsFxFunction(expression,"log",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (IsFxFunction(expression,"max",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (IsFxFunction(expression,"min",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression,"mod",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta)); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression,"not",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression,"pow",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression,"rand",4) != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression,"round",5) != MagickFalse) { /* Round the fraction to nearest integer. */ alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if ((alpha-floor(alpha)) < (ceil(alpha)-alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); if (IsFxFunction(expression,"sign",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression,"sinc",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha)); } if (IsFxFunction(expression,"sinh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression,"sin",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression,"sqrt",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression,"squish",6) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'T': case 't': { if (IsFxFunction(expression,"tanh",4) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression,"tan",3) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression,"trunc",5) != MagickFalse) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression,"while",5) != MagickFalse) { size_t length; /* Parse while(condition test, expression). */ length=CopyMagickString(subexpression,expression+6, MagickPathExtent-1); if (length != 0) subexpression[length-1]='\0'; FxParseConditional(subexpression,',',p,q); for (alpha=0.0; ; ) { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1, beta,exception); } FxReturn(alpha); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; double alpha; FxInfo **fx_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (expression == (const char *) NULL) return(CloneImage(image,0,0,MagickTrue,exception)); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,0,0,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FxImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* * Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image * images; char *expression; FILE * file; SplayTreeInfo * colors, *symbols; CacheView ** view; RandomInfo * random_info; ExceptionInfo * exception; }; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + A c q u i r e F x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format * of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image * *images,const char *expression, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o images: the image * sequence. % % o expression: the expression. % % o exception: return * any errors or warnings in this structure. % */ MagickPrivate FxInfo * AcquireFxInfo(const Image * images, const char *expression, ExceptionInfo * exception) { const Image * next; FxInfo * fx_info; register ssize_t i; unsigned char fx_op[2]; fx_info = (FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void)memset(fx_info, 0, sizeof(*fx_info)); fx_info->exception = AcquireExceptionInfo(); fx_info->images = images; fx_info->colors = NewSplayTree(CompareSplayTreeString, RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols = NewSplayTree(CompareSplayTreeString, RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view = (CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images), sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); i = 0; next = GetFirstImageInList(fx_info->images); for (; next != (Image *) NULL; next = next->next) { fx_info->view[i] = AcquireVirtualCacheView(next, exception); i++; } fx_info->random_info = AcquireRandomInfo(); fx_info->expression = ConstantString(expression); fx_info->file = stderr; /* * Convert compound to simple operators. */ fx_op[1] = '\0'; *fx_op = (unsigned char)BitwiseAndAssignmentOperator; (void)SubstituteString(&fx_info->expression, "&=", (char *)fx_op); *fx_op = (unsigned char)BitwiseOrAssignmentOperator; (void)SubstituteString(&fx_info->expression, "|=", (char *)fx_op); *fx_op = (unsigned char)LeftShiftAssignmentOperator; (void)SubstituteString(&fx_info->expression, "<<=", (char *)fx_op); *fx_op = (unsigned char)RightShiftAssignmentOperator; (void)SubstituteString(&fx_info->expression, ">>=", (char *)fx_op); *fx_op = (unsigned char)PowerAssignmentOperator; (void)SubstituteString(&fx_info->expression, "^=", (char *)fx_op); *fx_op = (unsigned char)ModuloAssignmentOperator; (void)SubstituteString(&fx_info->expression, "%=", (char *)fx_op); *fx_op = (unsigned char)PlusAssignmentOperator; (void)SubstituteString(&fx_info->expression, "+=", (char *)fx_op); *fx_op = (unsigned char)SubtractAssignmentOperator; (void)SubstituteString(&fx_info->expression, "-=", (char *)fx_op); *fx_op = (unsigned char)MultiplyAssignmentOperator; (void)SubstituteString(&fx_info->expression, "*=", (char *)fx_op); *fx_op = (unsigned char)DivideAssignmentOperator; (void)SubstituteString(&fx_info->expression, "/=", (char *)fx_op); *fx_op = (unsigned char)IncrementAssignmentOperator; (void)SubstituteString(&fx_info->expression, "++", (char *)fx_op); *fx_op = (unsigned char)DecrementAssignmentOperator; (void)SubstituteString(&fx_info->expression, "--", (char *)fx_op); *fx_op = (unsigned char)LeftShiftOperator; (void)SubstituteString(&fx_info->expression, "<<", (char *)fx_op); *fx_op = (unsigned char)RightShiftOperator; (void)SubstituteString(&fx_info->expression, ">>", (char *)fx_op); *fx_op = (unsigned char)LessThanEqualOperator; (void)SubstituteString(&fx_info->expression, "<=", (char *)fx_op); *fx_op = (unsigned char)GreaterThanEqualOperator; (void)SubstituteString(&fx_info->expression, ">=", (char *)fx_op); *fx_op = (unsigned char)EqualOperator; (void)SubstituteString(&fx_info->expression, "==", (char *)fx_op); *fx_op = (unsigned char)NotEqualOperator; (void)SubstituteString(&fx_info->expression, "!=", (char *)fx_op); *fx_op = (unsigned char)LogicalAndOperator; (void)SubstituteString(&fx_info->expression, "&&", (char *)fx_op); *fx_op = (unsigned char)LogicalOrOperator; (void)SubstituteString(&fx_info->expression, "||", (char *)fx_op); *fx_op = (unsigned char)ExponentialNotation; (void)SubstituteString(&fx_info->expression, "**", (char *)fx_op); /* * Force right-to-left associativity for unary negation. */ (void)SubstituteString(&fx_info->expression, "-", "-1.0*"); (void)SubstituteString(&fx_info->expression, "^-1.0*", "^-"); (void)SubstituteString(&fx_info->expression, "E-1.0*", "E-"); (void)SubstituteString(&fx_info->expression, "e-1.0*", "e-"); (void)SubstituteString(&fx_info->expression, " ", ""); /* compact string */ return (fx_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y F x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo * structure. % % The format of the DestroyFxInfo method is: % % * ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each * parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo * DestroyFxInfo(FxInfo * fx_info) { register ssize_t i; fx_info->exception = DestroyExceptionInfo(fx_info->exception); fx_info->expression = DestroyString(fx_info->expression); fx_info->symbols = DestroySplayTree(fx_info->symbols); fx_info->colors = DestroySplayTree(fx_info->colors); for (i = (ssize_t) GetImageListLength(fx_info->images) - 1; i >= 0; i--) fx_info->view[i] = DestroyCacheView(fx_info->view[i]); fx_info->view = (CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info = DestroyRandomInfo(fx_info->random_info); fx_info = (FxInfo *) RelinquishMagickMemory(fx_info); return (fx_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + F x E v a l u a t e C h a n n e l E x p r e s s i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FxEvaluateChannelExpression() evaluates an expression and * returns the % results. % % The format of the FxEvaluateExpression method * is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % * const PixelChannel channel,const ssize_t x,const ssize_t y, % * double *alpha,Exceptioninfo *exception) % double * FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo * *exception) % % A description of each parameter follows: % % o * fx_info: the fx info. % % o channel: the channel. % % o x,y: the * pixel position. % % o alpha: the result. % % o exception: return any * errors or warnings in this structure. % */ static inline const double * GetFxSymbolValue(FxInfo * magick_restrict fx_info, const char *symbol) { return ((const double *)GetValueFromSplayTree(fx_info->symbols, symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo * magick_restrict fx_info, const char *magick_restrict symbol, double const value) { double *object; object = (double *)GetValueFromSplayTree(fx_info->symbols, symbol); if (object != (double *)NULL) { *object = value; return (MagickTrue); } object = (double *)AcquireQuantumMemory(1, sizeof(*object)); if (object == (double *)NULL) { (void)ThrowMagickException(fx_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", fx_info->images->filename); return (MagickFalse); } *object = value; return (AddValueToSplayTree(fx_info->symbols, ConstantString(symbol), object)); } static double FxChannelStatistics(FxInfo * fx_info, Image * image, PixelChannel channel, const char *symbol, ExceptionInfo * exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; register const char *p; channel_mask = UndefinedChannel; for (p = symbol; (*p != '.') && (*p != '\0'); p++); if (*p == '.') { ssize_t option; option = ParseCommandOption(MagickPixelChannelOptions, MagickTrue, p + 1); if (option >= 0) { channel = (PixelChannel) option; channel_mask = SetPixelChannelMask(image, (ChannelType) (1UL << channel)); } } (void)FormatLocaleString(key, MagickPathExtent, "%p.%.20g.%s", (void *)image, (double)channel, symbol); value = GetFxSymbolValue(fx_info, key); if (value != (const double *)NULL) { if (channel_mask != UndefinedChannel) (void)SetPixelChannelMask(image, channel_mask); return (QuantumScale * (*value)); } statistic = 0.0; if (LocaleNCompare(symbol, "depth", 5) == 0) { size_t depth; depth = GetImageDepth(image, exception); statistic = (double)depth; } if (LocaleNCompare(symbol, "kurtosis", 8) == 0) { double kurtosis, skewness; (void)GetImageKurtosis(image, &kurtosis, &skewness, exception); statistic = kurtosis; } if (LocaleNCompare(symbol, "maxima", 6) == 0) { double maxima, minima; (void)GetImageRange(image, &minima, &maxima, exception); statistic = maxima; } if (LocaleNCompare(symbol, "mean", 4) == 0) { double mean, standard_deviation; (void)GetImageMean(image, &mean, &standard_deviation, exception); statistic = mean; } if (LocaleNCompare(symbol, "median", 6) == 0) { double median; (void)GetImageMedian(image, &median, exception); statistic = median; } if (LocaleNCompare(symbol, "minima", 6) == 0) { double maxima, minima; (void)GetImageRange(image, &minima, &maxima, exception); statistic = minima; } if (LocaleNCompare(symbol, "skewness", 8) == 0) { double kurtosis, skewness; (void)GetImageKurtosis(image, &kurtosis, &skewness, exception); statistic = skewness; } if (LocaleNCompare(symbol, "standard_deviation", 18) == 0) { double mean, standard_deviation; (void)GetImageMean(image, &mean, &standard_deviation, exception); statistic = standard_deviation; } if (channel_mask != UndefinedChannel) (void)SetPixelChannelMask(image, channel_mask); if (SetFxSymbolValue(fx_info, key, statistic) == MagickFalse) return (0.0); return (QuantumScale * statistic); } static double FxEvaluateSubexpression(FxInfo *, const PixelChannel, const ssize_t, const ssize_t, const char *, const size_t, double *, ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name, const size_t length) { int c; register size_t i; for (i = 0; i <= length; i++) if (expression[i] == '\0') return (MagickFalse); c = expression[length]; if ((LocaleNCompare(expression, name, length) == 0) && ((isspace(c) == 0) || (c == '('))) return (MagickTrue); return (MagickFalse); } static MagickOffsetType FxGCD(MagickOffsetType alpha, MagickOffsetType beta) { if (beta != 0) return (FxGCD(beta, alpha % beta)); return (alpha); } static inline const char * FxSubexpression(const char *expression, ExceptionInfo * exception) { const char *subexpression; register ssize_t level; level = 0; subexpression = expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")", (int)*subexpression) == (char *)NULL))) { if (strchr("(", (int)*subexpression) != (char *)NULL) level++; else if (strchr(")", (int)*subexpression) != (char *)NULL) level--; subexpression++; } if (*subexpression == '\0') (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnbalancedParenthesis", "`%s'", expression); return (subexpression); } static double FxGetSymbol(FxInfo * fx_info, const PixelChannel channel, const ssize_t x, const ssize_t y, const char *expression, const size_t depth, ExceptionInfo * exception) { char *q, symbol[MagickPathExtent]; const char *p; const double *value; double alpha, beta; Image * image; MagickBooleanType status; PixelInfo pixel; PointInfo point; register ssize_t i; size_t level; p = expression; i = GetImageIndexInList(fx_info->images); level = 0; point.x = (double)x; point.y = (double)y; if (isalpha((int)((unsigned char)*(p + 1))) == 0) { char *subexpression; subexpression = AcquireString(expression); if (strchr("suv", (int)*p) != (char *)NULL) { switch (*p) { case 's': default: { i = GetImageIndexInList(fx_info->images); break; } case 'u': i = 0; break; case 'v': i = 1; break; } p++; if (*p == '[') { level++; q = subexpression; for (p++; *p != '\0';) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++ = (*p++); } *q = '\0'; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth, &beta, exception); i = (ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int)((unsigned char)*(p + 1))) == 0)) { p++; if (*p == '{') { level++; q = subexpression; for (p++; *p != '\0';) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++ = (*p++); } *q = '\0'; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth, &beta, exception); point.x = alpha; point.y = beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q = subexpression; for (p++; *p != '\0';) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++ = (*p++); } *q = '\0'; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth, &beta, exception); point.x += alpha; point.y += beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression = DestroyString(subexpression); } image = GetImageFromList(fx_info->images, i); if (image == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "NoSuchImage", "`%s'", expression); return (0.0); } i = GetImageIndexInList(image); GetPixelInfo(image, &pixel); status = InterpolatePixelInfo(image, fx_info->view[i], image->interpolate, point.x, point.y, &pixel, exception); (void)status; if ((*p != '\0') && (*(p + 1) != '\0') && (*(p + 2) != '\0') && (LocaleCompare(p, "intensity") != 0) && (LocaleCompare(p, "luma") != 0) && (LocaleCompare(p, "luminance") != 0) && (LocaleCompare(p, "hue") != 0) && (LocaleCompare(p, "saturation") != 0) && (LocaleCompare(p, "lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void)CopyMagickString(name, p, MagickPathExtent); length = strlen(name); for (q = name + length - 1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q = '\0'; break; } } q = name; if ((*q != '\0') && (*(q + 1) != '\0') && (*(q + 2) != '\0') && (GetFxSymbolValue(fx_info, name) == (const double *)NULL)) { PixelInfo * color; color = (PixelInfo *) GetValueFromSplayTree(fx_info->colors, name); if (color != (PixelInfo *) NULL) { pixel = (*color); p += length; } else { MagickBooleanType status; status = QueryColorCompliance(name, AllCompliance, &pixel, fx_info->exception); if (status != MagickFalse) { (void)AddValueToSplayTree(fx_info->colors, ConstantString(name), ClonePixelInfo(&pixel)); p += length; } } } } (void)CopyMagickString(symbol, p, MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return (QuantumScale * pixel.red); case GreenPixelChannel: return (QuantumScale * pixel.green); case BluePixelChannel: return (QuantumScale * pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ColorSeparatedImageRequired", "`%s'", image->filename); return (0.0); } return (QuantumScale * pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return (1.0); alpha = (double)(QuantumScale * pixel.alpha); return (alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image, &pixel, quantum_pixel); return (QuantumScale * GetPixelIntensity(image, quantum_pixel)); } case IndexPixelChannel: return (0.0); default: break; } (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", p); return (0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol, "a") == 0) return ((QuantumScale * pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol, "b") == 0) return (QuantumScale * pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol, "channel", 7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags = ParseGeometry(symbol + 7, &channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return (0.0); return (channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return (0.0); return (channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return (0.0); return (channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return (0.0); return (channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return (0.0); return (channel_info.chi); } default: return (0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return (0.0); return (channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return (0.0); return (channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return (0.0); return (channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return (0.0); return (channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return (0.0); return (channel_info.psi); } default: return (0.0); } } if (LocaleCompare(symbol, "c") == 0) return (QuantumScale * pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol, "depth", 5) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol, "extent") == 0) { if (image->extent != 0) return ((double)image->extent); return ((double)GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol, "g") == 0) return (QuantumScale * pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol, "kurtosis", 8) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleCompare(symbol, "k") == 0) { if (image->colorspace != CMYKColorspace) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ColorSeparatedImageRequired", "`%s'", image->filename); return (0.0); } return (QuantumScale * pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol, "h") == 0) return ((double)image->rows); if (LocaleCompare(symbol, "hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red, pixel.green, pixel.blue, &hue, &saturation, &lightness); return (hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol, "image.depth") == 0) || (LocaleCompare(symbol, "image.minima") == 0) || (LocaleCompare(symbol, "image.maxima") == 0) || (LocaleCompare(symbol, "image.mean") == 0) || (LocaleCompare(symbol, "image.kurtosis") == 0) || (LocaleCompare(symbol, "image.skewness") == 0) || (LocaleCompare(symbol, "image.standard_deviation") == 0)) return (FxChannelStatistics(fx_info, image, channel, symbol + 6, exception)); if (LocaleCompare(symbol, "image.resolution.x") == 0) return (image->resolution.x); if (LocaleCompare(symbol, "image.resolution.y") == 0) return (image->resolution.y); if (LocaleCompare(symbol, "intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image, &pixel, quantum_pixel); return (QuantumScale * GetPixelIntensity(image, quantum_pixel)); } if (LocaleCompare(symbol, "i") == 0) return ((double)x); break; } case 'J': case 'j': { if (LocaleCompare(symbol, "j") == 0) return ((double)y); break; } case 'L': case 'l': { if (LocaleCompare(symbol, "lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red, pixel.green, pixel.blue, &hue, &saturation, &lightness); return (lightness); } if (LocaleCompare(symbol, "luma") == 0) { double luma; luma = 0.212656 * pixel.red + 0.715158 * pixel.green + 0.072186 * pixel.blue; return (QuantumScale * luma); } if (LocaleCompare(symbol, "luminance") == 0) { double luminence; luminence = 0.212656 * pixel.red + 0.715158 * pixel.green + 0.072186 * pixel.blue; return (QuantumScale * luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol, "maxima", 6) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "mean", 4) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "median", 6) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "minima", 6) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleCompare(symbol, "m") == 0) return (QuantumScale * pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol, "n") == 0) return ((double)GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol, "o") == 0) return (QuantumScale * pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol, "page.height") == 0) return ((double)image->page.height); if (LocaleCompare(symbol, "page.width") == 0) return ((double)image->page.width); if (LocaleCompare(symbol, "page.x") == 0) return ((double)image->page.x); if (LocaleCompare(symbol, "page.y") == 0) return ((double)image->page.y); if (LocaleCompare(symbol, "printsize.x") == 0) return (PerceptibleReciprocal(image->resolution.x) * image->columns); if (LocaleCompare(symbol, "printsize.y") == 0) return (PerceptibleReciprocal(image->resolution.y) * image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol, "quality") == 0) return ((double)image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol, "resolution.x") == 0) return (image->resolution.x); if (LocaleCompare(symbol, "resolution.y") == 0) return (image->resolution.y); if (LocaleCompare(symbol, "r") == 0) return (QuantumScale * pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol, "saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red, pixel.green, pixel.blue, &hue, &saturation, &lightness); return (saturation); } if (LocaleNCompare(symbol, "skewness", 8) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "standard_deviation", 18) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol, "t") == 0) return ((double)GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol, "w") == 0) return ((double)image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol, "y") == 0) return (QuantumScale * pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol, "z") == 0) return ((double)GetImageDepth(image, fx_info->exception)); break; } default: break; } value = GetFxSymbolValue(fx_info, symbol); if (value != (const double *)NULL) return (*value); (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UndefinedVariable", "`%s'", symbol); (void)SetFxSymbolValue(fx_info, symbol, 0.0); return (0.0); } static const char * FxOperatorPrecedence(const char *expression, ExceptionInfo * exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c = (-1); level = 0; subexpression = (const char *)NULL; target = NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence = UndefinedPrecedence; if ((isspace((int)((unsigned char)*expression)) != 0) || (c == (int)'@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression, "acosh", 5) != MagickFalse) { expression += 5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression, "asinh", 5) != MagickFalse) { expression += 5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression, "atanh", 5) != MagickFalse) { expression += 5; break; } #endif if (IsFxFunction(expression, "atan2", 5) != MagickFalse) { expression += 5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression, "E+", 2) == 0) || (LocaleNCompare(expression, "E-", 2) == 0))) { expression += 2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression, "j0", 2) != MagickFalse) || (IsFxFunction(expression, "j1", 2) != MagickFalse)) { expression += 2; break; } break; } case '#': { while (isxdigit((int)((unsigned char)*(expression + 1))) != 0) expression++; break; } default: break; } if ((c == (int)'{') || (c == (int)'[')) level++; else if ((c == (int)'}') || (c == (int)']')) level--; if (level == 0) switch ((unsigned char)*expression) { case '~': case '!': { precedence = BitwiseComplementPrecedence; break; } case '^': case '@': { precedence = ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")", c) != (char *)NULL))) && (((islower((int)((unsigned char)*expression)) != 0) || (strchr("(", (int)((unsigned char)*expression)) != (char *)NULL)) || ((isdigit(c) == 0) && (isdigit((int)((unsigned char)*expression)) != 0))) && (strchr("xy", (int)((unsigned char)*expression)) == (char *)NULL)) precedence = MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence = MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,", c) == (char *)NULL) || (isalpha(c) != 0)) precedence = AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence = AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence = ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence = RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence = EquivalencyPrecedence; break; } case '&': { precedence = BitwiseAndPrecedence; break; } case '|': { precedence = BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence = LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence = LogicalOrPrecedence; break; } case ExponentialNotation: { precedence = ExponentialNotationPrecedence; break; } case ':': case '?': { precedence = TernaryPrecedence; break; } case '=': { precedence = AssignmentPrecedence; break; } case ',': { precedence = CommaPrecedence; break; } case ';': { precedence = SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* * Right-to-left associativity. */ target = precedence; subexpression = expression; } } else if (precedence >= target) { /* * Left-to-right associativity. */ target = precedence; subexpression = expression; } if (strchr("(", (int)*expression) != (char *)NULL) expression = FxSubexpression(expression, exception); c = (int)(*expression++); } return (subexpression); } static double FxEvaluateSubexpression(FxInfo * fx_info, const PixelChannel channel, const ssize_t x, const ssize_t y, const char *expression, const size_t depth, double *beta, ExceptionInfo * exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; register const char *p; *beta = 0.0; sans = 0.0; subexpression = AcquireString(expression); *subexpression = '\0'; if (depth > FxMaxSubexpressionDepth) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int)((unsigned char)*expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p = FxOperatorPrecedence(expression, exception); if (p != (const char *)NULL) { (void)CopyMagickString(subexpression, expression, (size_t) (p - expression + 1)); alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth + 1, beta, exception); switch ((unsigned char)*p) { case '~': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); *beta = (double)(~(size_t) * beta); FxReturn(*beta); } case '!': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta = pow(alpha, FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha * (*beta)); } case '/': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(PerceptibleReciprocal(*beta) * alpha); } case '%': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(fmod(alpha, *beta)); } case '+': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha + (*beta)); } case '-': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha - (*beta)); } case BitwiseAndAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = (double)((size_t) (alpha + 0.5) & (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = (double)((size_t) (alpha + 0.5) | (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (*beta + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } value = (double)((size_t) (alpha + 0.5) << (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (*beta + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } value = (double)((size_t) (alpha + 0.5) >> (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = pow(alpha, *beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = fmod(alpha, *beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha + (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha - (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha * (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha * PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha + 1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info, p, value) == MagickFalse) return (0.0); } else if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha - 1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info, p, value) == MagickFalse) return (0.0); } else if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (gamma + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } *beta = (double)((size_t) (alpha + 0.5) << (size_t) (gamma + 0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (gamma + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } *beta = (double)((size_t) (alpha + 0.5) >> (size_t) (gamma + 0.5)); FxReturn(*beta); } case '<': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(fabs(alpha - (*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(fabs(alpha - (*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); *beta = (double)((size_t) (alpha + 0.5) & (size_t) (gamma + 0.5)); FxReturn(*beta); } case '|': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); *beta = (double)((size_t) (alpha + 0.5) | (size_t) (gamma + 0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta = 0.0; FxReturn(*beta); } gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); *beta = (gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta = 1.0; FxReturn(*beta); } gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); *beta = (gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void)CopyMagickString(subexpression, ++p, MagickPathExtent - 1); FxParseConditional(subexpression, ':', p, q); if (fabs(alpha) >= MagickEpsilon) gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); else gamma = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); FxReturn(gamma); } case '=': { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case ',': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha); } case ';': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(*beta); } default: { gamma = alpha * FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(gamma); } } } if (strchr("(", (int)*expression) != (char *)NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ParenthesisNestedTooDeeply", "`%s'", expression); length = CopyMagickString(subexpression, expression + 1, MagickPathExtent); if (length != 0) subexpression[length - 1] = '\0'; gamma = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth + 1, beta, exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 1, depth + 1, beta, exception); FxReturn(1.0 * gamma); } case '-': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 1, depth + 1, beta, exception); FxReturn(-1.0 * gamma); } case '~': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 1, depth + 1, beta, exception); FxReturn((double)(~(size_t) (gamma + 0.5))); } case 'A': case 'a': { if (IsFxFunction(expression, "abs", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression, "acosh", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression, "acos", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression, "airy", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); if (alpha == 0.0) FxReturn(1.0); gamma = 2.0 * j1((MagickPI * alpha)) / (MagickPI * alpha); FxReturn(gamma * gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression, "asinh", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression, "asin", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression, "alt", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression, "atan2", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(atan2(alpha, *beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression, "atanh", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression, "atan", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression, "a") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression, "b") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression, "ceil", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression, "clamp", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression, "cosh", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression, "cos", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression, "c") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression, "debug", 5) != MagickFalse) { const char *type; size_t length; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type = "cyan"; break; case MagentaPixelChannel: type = "magenta"; break; case YellowPixelChannel: type = "yellow"; break; case AlphaPixelChannel: type = "alpha"; break; case BlackPixelChannel: type = "black"; break; default: type = "unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type = "gray"; break; case AlphaPixelChannel: type = "alpha"; break; default: type = "unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type = "red"; break; case GreenPixelChannel: type = "green"; break; case BluePixelChannel: type = "blue"; break; case AlphaPixelChannel: type = "alpha"; break; default: type = "unknown"; break; } break; } } *subexpression = '\0'; length = 1; if (strlen(expression) > 6) length = CopyMagickString(subexpression, expression + 6, MagickPathExtent); if (length != 0) subexpression[length - 1] = '\0'; if (fx_info->file != (FILE *) NULL) (void)FormatLocaleFile(fx_info->file, "%s[%.20g,%.20g].%s: " "%s=%.*g\n", fx_info->images->filename, (double)x, (double)y, type, subexpression, GetMagickPrecision(), alpha); FxReturn(alpha); } if (IsFxFunction(expression, "do", 2) != MagickFalse) { size_t length; /* * Parse do(expression,condition test). */ length = CopyMagickString(subexpression, expression + 3, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); for (alpha = 0.0;;) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression, "drc", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn((alpha / (*beta * (alpha - 1.0) + 1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression, "epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression, "erf", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression, "exp", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression, "e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression, "floor", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression, "for", 3) != MagickFalse) { double sans = 0.0; size_t length; /* * Parse for(initialization, condition test, expression). */ length = CopyMagickString(subexpression, expression + 4, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); alpha = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); (void)CopyMagickString(subexpression, q + 1, MagickPathExtent - 1); FxParseConditional(subexpression, ',', p, q); for (alpha = 0.0;;) { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression, "gauss", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(exp((-alpha * alpha / 2.0)) / sqrt(2.0 * MagickPI)); } if (IsFxFunction(expression, "gcd", 3) != MagickFalse) { MagickOffsetType gcd; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); gcd = FxGCD((MagickOffsetType) (alpha + 0.5), (MagickOffsetType) (*beta + 0.5)); FxReturn((double)gcd); } if (LocaleCompare(expression, "g") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression, "h") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (LocaleCompare(expression, "hue") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (IsFxFunction(expression, "hypot", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(hypot(alpha, *beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression, "k") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression, "if", 2) != MagickFalse) { double sans = 0.0; size_t length; length = CopyMagickString(subexpression, expression + 3, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); alpha = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); (void)CopyMagickString(subexpression, q + 1, MagickPathExtent - 1); FxParseConditional(subexpression, ',', p, q); if (fabs(alpha) >= MagickEpsilon) alpha = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); else alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); FxReturn(alpha); } if (LocaleCompare(expression, "intensity") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (IsFxFunction(expression, "int", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression, "isnan", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn((double)!!IsNaN(alpha)); } if (LocaleCompare(expression, "i") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression, "j") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression, "j0", 2) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 2, depth + 1, beta, exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression, "j1", 2) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 2, depth + 1, beta, exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression, "jinc", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0 * j1((MagickPI * alpha)) / (MagickPI * alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression, "ln", 2) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 2, depth + 1, beta, exception); FxReturn(log(alpha)); } if (IsFxFunction(expression, "logtwo", 6) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 6, depth + 1, beta, exception); FxReturn(log10(alpha) / log10(2.0)); } if (IsFxFunction(expression, "log", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression, "lightness") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression, "MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression, "maxima", 6) == 0) break; if (IsFxFunction(expression, "max", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression, "minima", 6) == 0) break; if (IsFxFunction(expression, "min", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression, "mod", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(alpha - floor((alpha * PerceptibleReciprocal(*beta))) * (*beta)); } if (LocaleCompare(expression, "m") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression, "not", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn((double)(alpha < MagickEpsilon)); } if (LocaleCompare(expression, "n") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression, "Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression, "o") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression, "phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression, "pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression, "pow", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(pow(alpha, *beta)); } if (LocaleCompare(expression, "p") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression, "QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression, "QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression, "rand", 4) != MagickFalse) { alpha = GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression, "round", 5) != MagickFalse) { /* * Round the fraction to nearest integer. */ alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); if ((alpha - floor(alpha)) < (ceil(alpha) - alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression, "r") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'S': case 's': { if (LocaleCompare(expression, "saturation") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (IsFxFunction(expression, "sign", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression, "sinc", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI * alpha)) / (MagickPI * alpha)); } if (IsFxFunction(expression, "sinh", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression, "sin", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression, "sqrt", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression, "squish", 6) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 6, depth + 1, beta, exception); FxReturn((1.0 / (1.0 + exp(-alpha)))); } if (LocaleCompare(expression, "s") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'T': case 't': { if (IsFxFunction(expression, "tanh", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression, "tan", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression, "Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression, "trunc", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression, "t") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression, "u") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression, "v") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression, "while", 5) != MagickFalse) { size_t length; /* * Parse while(condition test, expression). */ length = CopyMagickString(subexpression, expression + 6, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); for (alpha = 0.0;;) { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); } FxReturn(alpha); } if (LocaleCompare(expression, "w") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression, "y") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression, "z") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } default: break; } subexpression = DestroyString(subexpression); q = (char *)expression; alpha = InterpretSiPrefixValue(expression, &q); if (q == expression) alpha = FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo * fx_info, double *alpha, ExceptionInfo * exception) { MagickBooleanType status; status = FxEvaluateChannelExpression(fx_info, GrayPixelChannel, 0, 0, alpha, exception); return (status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo * fx_info, double *alpha, ExceptionInfo * exception) { FILE * file; MagickBooleanType status; file = fx_info->file; fx_info->file = (FILE *) NULL; status = FxEvaluateChannelExpression(fx_info, GrayPixelChannel, 0, 0, alpha, exception); fx_info->file = file; return (status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo * fx_info, const PixelChannel channel, const ssize_t x, const ssize_t y, double *alpha, ExceptionInfo * exception) { double beta; beta = 0.0; *alpha = FxEvaluateSubexpression(fx_info, channel, x, y, fx_info->expression, 0, &beta, exception); return (exception->severity == OptionError ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F x I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FxImage() applies a mathematical expression to the specified * image. % % The format of the FxImage method is: % % Image * *FxImage(const Image *image,const char *expression, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o expression: A mathematical expression. % % o * exception: return any errors or warnings in this structure. % */ static FxInfo ** DestroyFxThreadSet(FxInfo ** fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i = 0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i] = DestroyFxInfo(fx_info[i]); fx_info = (FxInfo **) RelinquishMagickMemory(fx_info); return (fx_info); } static FxInfo ** AcquireFxThreadSet(const Image * image, const char *expression, ExceptionInfo * exception) { char *fx_expression; double alpha; FxInfo ** fx_info; register ssize_t i; size_t number_threads; number_threads = (size_t) GetMagickResourceLimit(ThreadResource); fx_info = (FxInfo **) AcquireQuantumMemory(number_threads, sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return ((FxInfo **) NULL); } (void)memset(fx_info, 0, number_threads * sizeof(*fx_info)); if (*expression != '@') fx_expression = ConstantString(expression); else fx_expression = FileToString(expression + 1, ~0UL, exception); for (i = 0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i] = AcquireFxInfo(image, fx_expression, exception); if (fx_info[i] == (FxInfo *) NULL) break; status = FxPreprocessExpression(fx_info[i], &alpha, exception); if (status == MagickFalse) break; } fx_expression = DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info = DestroyFxThreadSet(fx_info); return (fx_info); } MagickExport Image * FxImage(const Image * image, const char *expression, ExceptionInfo * exception) { #define FxImageTag "Fx/Image" CacheView * fx_view, *image_view; FxInfo ** magick_restrict fx_info; Image * fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (expression == (const char *)NULL) return (CloneImage(image, 0, 0, MagickTrue, exception)); fx_info = AcquireFxThreadSet(image, expression, exception); if (fx_info == (FxInfo **) NULL) return ((Image *) NULL); fx_image = CloneImage(image, 0, 0, MagickTrue, exception); if (fx_image == (Image *) NULL) { fx_info = DestroyFxThreadSet(fx_info); return ((Image *) NULL); } if (SetImageStorageClass(fx_image, DirectClass, exception) == MagickFalse) { fx_info = DestroyFxThreadSet(fx_info); fx_image = DestroyImage(fx_image); return ((Image *) NULL); } /* * Fx image. */ status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); fx_view = AcquireAuthenticCacheView(fx_image, exception); for (y = 0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); q = QueueCacheViewAuthenticPixels(fx_view, 0, y, fx_image->columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait fx_traits = GetPixelChannelTraits(fx_image, channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image, channel, p[i], q); continue; } alpha = 0.0; (void)FxEvaluateChannelExpression(fx_info[id], channel, x, y, &alpha, exception); q[i] = ClampToQuantum(QuantumRange * alpha); } p += GetPixelChannels(image); q += GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, FxImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } fx_view = DestroyCacheView(fx_view); image_view = DestroyCacheView(image_view); fx_info = DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image = DestroyImage(fx_image); return (fx_image); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* * Typedef declarations. */ typedef enum { BitwiseAndAssignmentOperator = 0xd9U, BitwiseOrAssignmentOperator, LeftShiftAssignmentOperator, RightShiftAssignmentOperator, PowerAssignmentOperator, ModuloAssignmentOperator, PlusAssignmentOperator, SubtractAssignmentOperator, MultiplyAssignmentOperator, DivideAssignmentOperator, IncrementAssignmentOperator, DecrementAssignmentOperator, LeftShiftOperator, RightShiftOperator, LessThanEqualOperator, GreaterThanEqualOperator, EqualOperator, NotEqualOperator, LogicalAndOperator, LogicalOrOperator, ExponentialNotation } FxOperator; struct _FxInfo { const Image * images; char *expression; FILE * file; SplayTreeInfo * colors, *symbols; CacheView ** view; RandomInfo * random_info; ExceptionInfo * exception; }; /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + A c q u i r e F x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format * of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image * *images,const char *expression, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o images: the image * sequence. % % o expression: the expression. % % o exception: return * any errors or warnings in this structure. % */ MagickPrivate FxInfo * AcquireFxInfo(const Image * images, const char *expression, ExceptionInfo * exception) { const Image * next; FxInfo * fx_info; register ssize_t i; unsigned char fx_op[2]; fx_info = (FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void)memset(fx_info, 0, sizeof(*fx_info)); fx_info->exception = AcquireExceptionInfo(); fx_info->images = images; fx_info->colors = NewSplayTree(CompareSplayTreeString, RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols = NewSplayTree(CompareSplayTreeString, RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view = (CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images), sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); i = 0; next = GetFirstImageInList(fx_info->images); for (; next != (Image *) NULL; next = next->next) { fx_info->view[i] = AcquireVirtualCacheView(next, exception); i++; } fx_info->random_info = AcquireRandomInfo(); fx_info->expression = ConstantString(expression); fx_info->file = stderr; /* * Convert compound to simple operators. */ fx_op[1] = '\0'; *fx_op = (unsigned char)BitwiseAndAssignmentOperator; (void)SubstituteString(&fx_info->expression, "&=", (char *)fx_op); *fx_op = (unsigned char)BitwiseOrAssignmentOperator; (void)SubstituteString(&fx_info->expression, "|=", (char *)fx_op); *fx_op = (unsigned char)LeftShiftAssignmentOperator; (void)SubstituteString(&fx_info->expression, "<<=", (char *)fx_op); *fx_op = (unsigned char)RightShiftAssignmentOperator; (void)SubstituteString(&fx_info->expression, ">>=", (char *)fx_op); *fx_op = (unsigned char)PowerAssignmentOperator; (void)SubstituteString(&fx_info->expression, "^=", (char *)fx_op); *fx_op = (unsigned char)ModuloAssignmentOperator; (void)SubstituteString(&fx_info->expression, "%=", (char *)fx_op); *fx_op = (unsigned char)PlusAssignmentOperator; (void)SubstituteString(&fx_info->expression, "+=", (char *)fx_op); *fx_op = (unsigned char)SubtractAssignmentOperator; (void)SubstituteString(&fx_info->expression, "-=", (char *)fx_op); *fx_op = (unsigned char)MultiplyAssignmentOperator; (void)SubstituteString(&fx_info->expression, "*=", (char *)fx_op); *fx_op = (unsigned char)DivideAssignmentOperator; (void)SubstituteString(&fx_info->expression, "/=", (char *)fx_op); *fx_op = (unsigned char)IncrementAssignmentOperator; (void)SubstituteString(&fx_info->expression, "++", (char *)fx_op); *fx_op = (unsigned char)DecrementAssignmentOperator; (void)SubstituteString(&fx_info->expression, "--", (char *)fx_op); *fx_op = (unsigned char)LeftShiftOperator; (void)SubstituteString(&fx_info->expression, "<<", (char *)fx_op); *fx_op = (unsigned char)RightShiftOperator; (void)SubstituteString(&fx_info->expression, ">>", (char *)fx_op); *fx_op = (unsigned char)LessThanEqualOperator; (void)SubstituteString(&fx_info->expression, "<=", (char *)fx_op); *fx_op = (unsigned char)GreaterThanEqualOperator; (void)SubstituteString(&fx_info->expression, ">=", (char *)fx_op); *fx_op = (unsigned char)EqualOperator; (void)SubstituteString(&fx_info->expression, "==", (char *)fx_op); *fx_op = (unsigned char)NotEqualOperator; (void)SubstituteString(&fx_info->expression, "!=", (char *)fx_op); *fx_op = (unsigned char)LogicalAndOperator; (void)SubstituteString(&fx_info->expression, "&&", (char *)fx_op); *fx_op = (unsigned char)LogicalOrOperator; (void)SubstituteString(&fx_info->expression, "||", (char *)fx_op); *fx_op = (unsigned char)ExponentialNotation; (void)SubstituteString(&fx_info->expression, "**", (char *)fx_op); /* * Force right-to-left associativity for unary negation. */ (void)SubstituteString(&fx_info->expression, "-", "-1.0*"); (void)SubstituteString(&fx_info->expression, "^-1.0*", "^-"); (void)SubstituteString(&fx_info->expression, "E-1.0*", "E-"); (void)SubstituteString(&fx_info->expression, "e-1.0*", "e-"); (void)SubstituteString(&fx_info->expression, " ", ""); /* compact string */ return (fx_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y F x I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo * structure. % % The format of the DestroyFxInfo method is: % % * ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each * parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo * DestroyFxInfo(FxInfo * fx_info) { register ssize_t i; fx_info->exception = DestroyExceptionInfo(fx_info->exception); fx_info->expression = DestroyString(fx_info->expression); fx_info->symbols = DestroySplayTree(fx_info->symbols); fx_info->colors = DestroySplayTree(fx_info->colors); for (i = (ssize_t) GetImageListLength(fx_info->images) - 1; i >= 0; i--) fx_info->view[i] = DestroyCacheView(fx_info->view[i]); fx_info->view = (CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info = DestroyRandomInfo(fx_info->random_info); fx_info = (FxInfo *) RelinquishMagickMemory(fx_info); return (fx_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + F x E v a l u a t e C h a n n e l E x p r e s s i o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FxEvaluateChannelExpression() evaluates an expression and * returns the % results. % % The format of the FxEvaluateExpression method * is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % * const PixelChannel channel,const ssize_t x,const ssize_t y, % * double *alpha,Exceptioninfo *exception) % double * FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo * *exception) % % A description of each parameter follows: % % o * fx_info: the fx info. % % o channel: the channel. % % o x,y: the * pixel position. % % o alpha: the result. % % o exception: return any * errors or warnings in this structure. % */ static inline const double * GetFxSymbolValue(FxInfo * magick_restrict fx_info, const char *symbol) { return ((const double *)GetValueFromSplayTree(fx_info->symbols, symbol)); } static inline MagickBooleanType SetFxSymbolValue( FxInfo * magick_restrict fx_info, const char *magick_restrict symbol, double const value) { double *object; object = (double *)GetValueFromSplayTree(fx_info->symbols, symbol); if (object != (double *)NULL) { *object = value; return (MagickTrue); } object = (double *)AcquireQuantumMemory(1, sizeof(*object)); if (object == (double *)NULL) { (void)ThrowMagickException(fx_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", fx_info->images->filename); return (MagickFalse); } *object = value; return (AddValueToSplayTree(fx_info->symbols, ConstantString(symbol), object)); } static double FxChannelStatistics(FxInfo * fx_info, Image * image, PixelChannel channel, const char *symbol, ExceptionInfo * exception) { ChannelType channel_mask; char key[MagickPathExtent]; const double *value; double statistic; register const char *p; channel_mask = UndefinedChannel; for (p = symbol; (*p != '.') && (*p != '\0'); p++); if (*p == '.') { ssize_t option; option = ParseCommandOption(MagickPixelChannelOptions, MagickTrue, p + 1); if (option >= 0) { channel = (PixelChannel) option; channel_mask = SetPixelChannelMask(image, (ChannelType) (1UL << channel)); } } (void)FormatLocaleString(key, MagickPathExtent, "%p.%.20g.%s", (void *)image, (double)channel, symbol); value = GetFxSymbolValue(fx_info, key); if (value != (const double *)NULL) { if (channel_mask != UndefinedChannel) (void)SetPixelChannelMask(image, channel_mask); return (QuantumScale * (*value)); } statistic = 0.0; if (LocaleNCompare(symbol, "depth", 5) == 0) { size_t depth; depth = GetImageDepth(image, exception); statistic = (double)depth; } if (LocaleNCompare(symbol, "kurtosis", 8) == 0) { double kurtosis, skewness; (void)GetImageKurtosis(image, &kurtosis, &skewness, exception); statistic = kurtosis; } if (LocaleNCompare(symbol, "maxima", 6) == 0) { double maxima, minima; (void)GetImageRange(image, &minima, &maxima, exception); statistic = maxima; } if (LocaleNCompare(symbol, "mean", 4) == 0) { double mean, standard_deviation; (void)GetImageMean(image, &mean, &standard_deviation, exception); statistic = mean; } if (LocaleNCompare(symbol, "median", 6) == 0) { double median; (void)GetImageMedian(image, &median, exception); statistic = median; } if (LocaleNCompare(symbol, "minima", 6) == 0) { double maxima, minima; (void)GetImageRange(image, &minima, &maxima, exception); statistic = minima; } if (LocaleNCompare(symbol, "skewness", 8) == 0) { double kurtosis, skewness; (void)GetImageKurtosis(image, &kurtosis, &skewness, exception); statistic = skewness; } if (LocaleNCompare(symbol, "standard_deviation", 18) == 0) { double mean, standard_deviation; (void)GetImageMean(image, &mean, &standard_deviation, exception); statistic = standard_deviation; } if (channel_mask != UndefinedChannel) (void)SetPixelChannelMask(image, channel_mask); if (SetFxSymbolValue(fx_info, key, statistic) == MagickFalse) return (0.0); return (QuantumScale * statistic); } static double FxEvaluateSubexpression(FxInfo *, const PixelChannel, const ssize_t, const ssize_t, const char *, const size_t, double *, ExceptionInfo *); static inline MagickBooleanType IsFxFunction(const char *expression, const char *name, const size_t length) { int c; register size_t i; for (i = 0; i <= length; i++) if (expression[i] == '\0') return (MagickFalse); c = expression[length]; if ((LocaleNCompare(expression, name, length) == 0) && ((isspace(c) == 0) || (c == '('))) return (MagickTrue); return (MagickFalse); } static MagickOffsetType FxGCD(MagickOffsetType alpha, MagickOffsetType beta) { if (beta != 0) return (FxGCD(beta, alpha % beta)); return (alpha); } static inline const char * FxSubexpression(const char *expression, ExceptionInfo * exception) { const char *subexpression; register ssize_t level; level = 0; subexpression = expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")", (int)*subexpression) == (char *)NULL))) { if (strchr("(", (int)*subexpression) != (char *)NULL) level++; else if (strchr(")", (int)*subexpression) != (char *)NULL) level--; subexpression++; } if (*subexpression == '\0') (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnbalancedParenthesis", "`%s'", expression); return (subexpression); } static double FxGetSymbol(FxInfo * fx_info, const PixelChannel channel, const ssize_t x, const ssize_t y, const char *expression, const size_t depth, ExceptionInfo * exception) { char *q, symbol[MagickPathExtent]; const char *p; const double *value; double alpha, beta; Image * image; MagickBooleanType status; PixelInfo pixel; PointInfo point; register ssize_t i; size_t level; p = expression; i = GetImageIndexInList(fx_info->images); level = 0; point.x = (double)x; point.y = (double)y; if (isalpha((int)((unsigned char)*(p + 1))) == 0) { char *subexpression; subexpression = AcquireString(expression); if (strchr("suv", (int)*p) != (char *)NULL) { switch (*p) { case 's': default: { i = GetImageIndexInList(fx_info->images); break; } case 'u': i = 0; break; case 'v': i = 1; break; } p++; if (*p == '[') { level++; q = subexpression; for (p++; *p != '\0';) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++ = (*p++); } *q = '\0'; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth, &beta, exception); i = (ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int)((unsigned char)*(p + 1))) == 0)) { p++; if (*p == '{') { level++; q = subexpression; for (p++; *p != '\0';) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++ = (*p++); } *q = '\0'; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth, &beta, exception); point.x = alpha; point.y = beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q = subexpression; for (p++; *p != '\0';) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++ = (*p++); } *q = '\0'; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth, &beta, exception); point.x += alpha; point.y += beta; if (*p != '\0') p++; } if (*p == '.') p++; } subexpression = DestroyString(subexpression); } image = GetImageFromList(fx_info->images, i); if (image == (Image *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "NoSuchImage", "`%s'", expression); return (0.0); } i = GetImageIndexInList(image); GetPixelInfo(image, &pixel); status = InterpolatePixelInfo(image, fx_info->view[i], image->interpolate, point.x, point.y, &pixel, exception); (void)status; if ((*p != '\0') && (*(p + 1) != '\0') && (*(p + 2) != '\0') && (LocaleCompare(p, "intensity") != 0) && (LocaleCompare(p, "luma") != 0) && (LocaleCompare(p, "luminance") != 0) && (LocaleCompare(p, "hue") != 0) && (LocaleCompare(p, "saturation") != 0) && (LocaleCompare(p, "lightness") != 0)) { char name[MagickPathExtent]; size_t length; (void)CopyMagickString(name, p, MagickPathExtent); length = strlen(name); for (q = name + length - 1; q > name; q--) { if (*q == ')') break; if (*q == '.') { *q = '\0'; break; } } q = name; if ((*q != '\0') && (*(q + 1) != '\0') && (*(q + 2) != '\0') && (GetFxSymbolValue(fx_info, name) == (const double *)NULL)) { PixelInfo * color; color = (PixelInfo *) GetValueFromSplayTree(fx_info->colors, name); if (color != (PixelInfo *) NULL) { pixel = (*color); p += length; } else { MagickBooleanType status; status = QueryColorCompliance(name, AllCompliance, &pixel, fx_info->exception); if (status != MagickFalse) { (void)AddValueToSplayTree(fx_info->colors, ConstantString(name), ClonePixelInfo(&pixel)); p += length; } } } } (void)CopyMagickString(symbol, p, MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return (QuantumScale * pixel.red); case GreenPixelChannel: return (QuantumScale * pixel.green); case BluePixelChannel: return (QuantumScale * pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void)ThrowMagickException(exception, GetMagickModule(), ImageError, "ColorSeparatedImageRequired", "`%s'", image->filename); return (0.0); } return (QuantumScale * pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return (1.0); alpha = (double)(QuantumScale * pixel.alpha); return (alpha); } case CompositePixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image, &pixel, quantum_pixel); return (QuantumScale * GetPixelIntensity(image, quantum_pixel)); } case IndexPixelChannel: return (0.0); default: break; } (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", p); return (0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol, "a") == 0) return ((QuantumScale * pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol, "b") == 0) return (QuantumScale * pixel.blue); break; } case 'C': case 'c': { if (IsFxFunction(symbol, "channel", 7) != MagickFalse) { GeometryInfo channel_info; MagickStatusType flags; flags = ParseGeometry(symbol + 7, &channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return (0.0); return (channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return (0.0); return (channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return (0.0); return (channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return (0.0); return (channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return (0.0); return (channel_info.chi); } default: return (0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return (0.0); return (channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return (0.0); return (channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return (0.0); return (channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return (0.0); return (channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return (0.0); return (channel_info.psi); } default: return (0.0); } } if (LocaleCompare(symbol, "c") == 0) return (QuantumScale * pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol, "depth", 5) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); break; } case 'E': case 'e': { if (LocaleCompare(symbol, "extent") == 0) { if (image->extent != 0) return ((double)image->extent); return ((double)GetBlobSize(image)); } break; } case 'G': case 'g': { if (LocaleCompare(symbol, "g") == 0) return (QuantumScale * pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol, "kurtosis", 8) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleCompare(symbol, "k") == 0) { if (image->colorspace != CMYKColorspace) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ColorSeparatedImageRequired", "`%s'", image->filename); return (0.0); } return (QuantumScale * pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol, "h") == 0) return ((double)image->rows); if (LocaleCompare(symbol, "hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red, pixel.green, pixel.blue, &hue, &saturation, &lightness); return (hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol, "image.depth") == 0) || (LocaleCompare(symbol, "image.minima") == 0) || (LocaleCompare(symbol, "image.maxima") == 0) || (LocaleCompare(symbol, "image.mean") == 0) || (LocaleCompare(symbol, "image.kurtosis") == 0) || (LocaleCompare(symbol, "image.skewness") == 0) || (LocaleCompare(symbol, "image.standard_deviation") == 0)) return (FxChannelStatistics(fx_info, image, channel, symbol + 6, exception)); if (LocaleCompare(symbol, "image.resolution.x") == 0) return (image->resolution.x); if (LocaleCompare(symbol, "image.resolution.y") == 0) return (image->resolution.y); if (LocaleCompare(symbol, "intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image, &pixel, quantum_pixel); return (QuantumScale * GetPixelIntensity(image, quantum_pixel)); } if (LocaleCompare(symbol, "i") == 0) return ((double)x); break; } case 'J': case 'j': { if (LocaleCompare(symbol, "j") == 0) return ((double)y); break; } case 'L': case 'l': { if (LocaleCompare(symbol, "lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red, pixel.green, pixel.blue, &hue, &saturation, &lightness); return (lightness); } if (LocaleCompare(symbol, "luma") == 0) { double luma; luma = 0.212656 * pixel.red + 0.715158 * pixel.green + 0.072186 * pixel.blue; return (QuantumScale * luma); } if (LocaleCompare(symbol, "luminance") == 0) { double luminence; luminence = 0.212656 * pixel.red + 0.715158 * pixel.green + 0.072186 * pixel.blue; return (QuantumScale * luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol, "maxima", 6) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "mean", 4) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "median", 6) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "minima", 6) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleCompare(symbol, "m") == 0) return (QuantumScale * pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol, "n") == 0) return ((double)GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol, "o") == 0) return (QuantumScale * pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol, "page.height") == 0) return ((double)image->page.height); if (LocaleCompare(symbol, "page.width") == 0) return ((double)image->page.width); if (LocaleCompare(symbol, "page.x") == 0) return ((double)image->page.x); if (LocaleCompare(symbol, "page.y") == 0) return ((double)image->page.y); if (LocaleCompare(symbol, "printsize.x") == 0) return (PerceptibleReciprocal(image->resolution.x) * image->columns); if (LocaleCompare(symbol, "printsize.y") == 0) return (PerceptibleReciprocal(image->resolution.y) * image->rows); break; } case 'Q': case 'q': { if (LocaleCompare(symbol, "quality") == 0) return ((double)image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol, "resolution.x") == 0) return (image->resolution.x); if (LocaleCompare(symbol, "resolution.y") == 0) return (image->resolution.y); if (LocaleCompare(symbol, "r") == 0) return (QuantumScale * pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol, "saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red, pixel.green, pixel.blue, &hue, &saturation, &lightness); return (saturation); } if (LocaleNCompare(symbol, "skewness", 8) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); if (LocaleNCompare(symbol, "standard_deviation", 18) == 0) return (FxChannelStatistics(fx_info, image, channel, symbol, exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol, "t") == 0) return ((double)GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol, "w") == 0) return ((double)image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol, "y") == 0) return (QuantumScale * pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol, "z") == 0) return ((double)GetImageDepth(image, fx_info->exception)); break; } default: break; } value = GetFxSymbolValue(fx_info, symbol); if (value != (const double *)NULL) return (*value); (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UndefinedVariable", "`%s'", symbol); (void)SetFxSymbolValue(fx_info, symbol, 0.0); return (0.0); } static const char * FxOperatorPrecedence(const char *expression, ExceptionInfo * exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c = (-1); level = 0; subexpression = (const char *)NULL; target = NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence = UndefinedPrecedence; if ((isspace((int)((unsigned char)*expression)) != 0) || (c == (int)'@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression, "acosh", 5) != MagickFalse) { expression += 5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression, "asinh", 5) != MagickFalse) { expression += 5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression, "atanh", 5) != MagickFalse) { expression += 5; break; } #endif if (IsFxFunction(expression, "atan2", 5) != MagickFalse) { expression += 5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression, "E+", 2) == 0) || (LocaleNCompare(expression, "E-", 2) == 0))) { expression += 2; /* scientific notation */ break; } } case 'J': case 'j': { if ((IsFxFunction(expression, "j0", 2) != MagickFalse) || (IsFxFunction(expression, "j1", 2) != MagickFalse)) { expression += 2; break; } break; } case '#': { while (isxdigit((int)((unsigned char)*(expression + 1))) != 0) expression++; break; } default: break; } if ((c == (int)'{') || (c == (int)'[')) level++; else if ((c == (int)'}') || (c == (int)']')) level--; if (level == 0) switch ((unsigned char)*expression) { case '~': case '!': { precedence = BitwiseComplementPrecedence; break; } case '^': case '@': { precedence = ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")", c) != (char *)NULL))) && (((islower((int)((unsigned char)*expression)) != 0) || (strchr("(", (int)((unsigned char)*expression)) != (char *)NULL)) || ((isdigit(c) == 0) && (isdigit((int)((unsigned char)*expression)) != 0))) && (strchr("xy", (int)((unsigned char)*expression)) == (char *)NULL)) precedence = MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence = MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,", c) == (char *)NULL) || (isalpha(c) != 0)) precedence = AdditionPrecedence; break; } case BitwiseAndAssignmentOperator: case BitwiseOrAssignmentOperator: case LeftShiftAssignmentOperator: case RightShiftAssignmentOperator: case PowerAssignmentOperator: case ModuloAssignmentOperator: case PlusAssignmentOperator: case SubtractAssignmentOperator: case MultiplyAssignmentOperator: case DivideAssignmentOperator: case IncrementAssignmentOperator: case DecrementAssignmentOperator: { precedence = AssignmentPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence = ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence = RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence = EquivalencyPrecedence; break; } case '&': { precedence = BitwiseAndPrecedence; break; } case '|': { precedence = BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence = LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence = LogicalOrPrecedence; break; } case ExponentialNotation: { precedence = ExponentialNotationPrecedence; break; } case ':': case '?': { precedence = TernaryPrecedence; break; } case '=': { precedence = AssignmentPrecedence; break; } case ',': { precedence = CommaPrecedence; break; } case ';': { precedence = SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* * Right-to-left associativity. */ target = precedence; subexpression = expression; } } else if (precedence >= target) { /* * Left-to-right associativity. */ target = precedence; subexpression = expression; } if (strchr("(", (int)*expression) != (char *)NULL) expression = FxSubexpression(expression, exception); c = (int)(*expression++); } return (subexpression); } static double FxEvaluateSubexpression(FxInfo * fx_info, const PixelChannel channel, const ssize_t x, const ssize_t y, const char *expression, const size_t depth, double *beta, ExceptionInfo * exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 200 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } #define FxParseConditional(subexpression,sentinal,p,q) \ { \ p=subexpression; \ for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \ if (*q == '(') \ { \ for (q++; (*q != ')') && (*q != '\0'); q++); \ if (*q == '\0') \ break; \ } \ if (*q == '\0') \ { \ (void) ThrowMagickException(exception,GetMagickModule(), \ OptionError,"UnableToParseExpression","`%s'",subexpression); \ FxReturn(0.0); \ } \ if (strlen(q) == 1) \ *(q+1)='\0'; \ *q='\0'; \ } char *q, *subexpression; double alpha, gamma, sans, value; register const char *p; *beta = 0.0; sans = 0.0; subexpression = AcquireString(expression); *subexpression = '\0'; if (depth > FxMaxSubexpressionDepth) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int)((unsigned char)*expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p = FxOperatorPrecedence(expression, exception); if (p != (const char *)NULL) { (void)CopyMagickString(subexpression, expression, (size_t) (p - expression + 1)); alpha = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth + 1, beta, exception); switch ((unsigned char)*p) { case '~': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); *beta = (double)(~(size_t) * beta); FxReturn(*beta); } case '!': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta = pow(alpha, FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha * (*beta)); } case '/': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(PerceptibleReciprocal(*beta) * alpha); } case '%': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(fmod(alpha, *beta)); } case '+': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha + (*beta)); } case '-': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha - (*beta)); } case BitwiseAndAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = (double)((size_t) (alpha + 0.5) & (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case BitwiseOrAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = (double)((size_t) (alpha + 0.5) | (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case LeftShiftAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (*beta + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } value = (double)((size_t) (alpha + 0.5) << (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case RightShiftAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (*beta + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } value = (double)((size_t) (alpha + 0.5) >> (size_t) (*beta + 0.5)); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case PowerAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = pow(alpha, *beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case ModuloAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = fmod(alpha, *beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case PlusAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha + (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case SubtractAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha - (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case MultiplyAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha * (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case DivideAssignmentOperator: { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha * PerceptibleReciprocal(*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case IncrementAssignmentOperator: { if (*subexpression == '\0') alpha = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha + 1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info, p, value) == MagickFalse) return (0.0); } else if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case DecrementAssignmentOperator: { if (*subexpression == '\0') alpha = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = alpha - 1.0; if (*subexpression == '\0') { if (SetFxSymbolValue(fx_info, p, value) == MagickFalse) return (0.0); } else if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case LeftShiftOperator: { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (gamma + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } *beta = (double)((size_t) (alpha + 0.5) << (size_t) (gamma + 0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); if ((size_t) (gamma + 0.5) >= (8 * sizeof(size_t))) { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ShiftCountOverflow", "`%s'", subexpression); FxReturn(0.0); } *beta = (double)((size_t) (alpha + 0.5) >> (size_t) (gamma + 0.5)); FxReturn(*beta); } case '<': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(fabs(alpha - (*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(fabs(alpha - (*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); *beta = (double)((size_t) (alpha + 0.5) & (size_t) (gamma + 0.5)); FxReturn(*beta); } case '|': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); *beta = (double)((size_t) (alpha + 0.5) | (size_t) (gamma + 0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta = 0.0; FxReturn(*beta); } gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); *beta = (gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta = 1.0; FxReturn(*beta); } gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); *beta = (gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void)CopyMagickString(subexpression, ++p, MagickPathExtent - 1); FxParseConditional(subexpression, ':', p, q); if (fabs(alpha) >= MagickEpsilon) gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); else gamma = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); FxReturn(gamma); } case '=': { q = subexpression; while (isalpha((int)((unsigned char)*q)) != 0) q++; if (*q != '\0') { (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "UnableToParseExpression", "`%s'", subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); value = (*beta); if (SetFxSymbolValue(fx_info, subexpression, value) == MagickFalse) return (0.0); FxReturn(*beta); } case ',': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(alpha); } case ';': { *beta = FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(*beta); } default: { gamma = alpha * FxEvaluateSubexpression(fx_info, channel, x, y, ++p, depth + 1, beta, exception); FxReturn(gamma); } } } if (strchr("(", (int)*expression) != (char *)NULL) { size_t length; if (depth >= FxMaxParenthesisDepth) (void)ThrowMagickException(exception, GetMagickModule(), OptionError, "ParenthesisNestedTooDeeply", "`%s'", expression); length = CopyMagickString(subexpression, expression + 1, MagickPathExtent); if (length != 0) subexpression[length - 1] = '\0'; gamma = FxEvaluateSubexpression(fx_info, channel, x, y, subexpression, depth + 1, beta, exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 1, depth + 1, beta, exception); FxReturn(1.0 * gamma); } case '-': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 1, depth + 1, beta, exception); FxReturn(-1.0 * gamma); } case '~': { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 1, depth + 1, beta, exception); FxReturn((double)(~(size_t) (gamma + 0.5))); } case 'A': case 'a': { if (IsFxFunction(expression, "abs", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (IsFxFunction(expression, "acosh", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(acosh(alpha)); } #endif if (IsFxFunction(expression, "acos", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression, "airy", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); if (alpha == 0.0) FxReturn(1.0); gamma = 2.0 * j1((MagickPI * alpha)) / (MagickPI * alpha); FxReturn(gamma * gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (IsFxFunction(expression, "asinh", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(asinh(alpha)); } #endif if (IsFxFunction(expression, "asin", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(asin(alpha)); } if (IsFxFunction(expression, "alt", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (IsFxFunction(expression, "atan2", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(atan2(alpha, *beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (IsFxFunction(expression, "atanh", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(atanh(alpha)); } #endif if (IsFxFunction(expression, "atan", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression, "a") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression, "b") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'C': case 'c': { if (IsFxFunction(expression, "ceil", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(ceil(alpha)); } if (IsFxFunction(expression, "clamp", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (IsFxFunction(expression, "cosh", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(cosh(alpha)); } if (IsFxFunction(expression, "cos", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression, "c") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'D': case 'd': { if (IsFxFunction(expression, "debug", 5) != MagickFalse) { const char *type; size_t length; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); switch (fx_info->images->colorspace) { case CMYKColorspace: { switch (channel) { case CyanPixelChannel: type = "cyan"; break; case MagentaPixelChannel: type = "magenta"; break; case YellowPixelChannel: type = "yellow"; break; case AlphaPixelChannel: type = "alpha"; break; case BlackPixelChannel: type = "black"; break; default: type = "unknown"; break; } break; } case GRAYColorspace: { switch (channel) { case RedPixelChannel: type = "gray"; break; case AlphaPixelChannel: type = "alpha"; break; default: type = "unknown"; break; } break; } default: { switch (channel) { case RedPixelChannel: type = "red"; break; case GreenPixelChannel: type = "green"; break; case BluePixelChannel: type = "blue"; break; case AlphaPixelChannel: type = "alpha"; break; default: type = "unknown"; break; } break; } } *subexpression = '\0'; length = 1; if (strlen(expression) > 6) length = CopyMagickString(subexpression, expression + 6, MagickPathExtent); if (length != 0) subexpression[length - 1] = '\0'; if (fx_info->file != (FILE *) NULL) (void)FormatLocaleFile(fx_info->file, "%s[%.20g,%.20g].%s: " "%s=%.*g\n", fx_info->images->filename, (double)x, (double)y, type, subexpression, GetMagickPrecision(), alpha); FxReturn(alpha); } if (IsFxFunction(expression, "do", 2) != MagickFalse) { size_t length; /* * Parse do(expression,condition test). */ length = CopyMagickString(subexpression, expression + 3, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); for (alpha = 0.0;;) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); if (fabs(gamma) < MagickEpsilon) break; } FxReturn(alpha); } if (IsFxFunction(expression, "drc", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn((alpha / (*beta * (alpha - 1.0) + 1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression, "epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (IsFxFunction(expression, "erf", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(erf(alpha)); } #endif if (IsFxFunction(expression, "exp", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression, "e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (IsFxFunction(expression, "floor", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression, "for", 3) != MagickFalse) { double sans = 0.0; size_t length; /* * Parse for(initialization, condition test, expression). */ length = CopyMagickString(subexpression, expression + 4, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); alpha = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); (void)CopyMagickString(subexpression, q + 1, MagickPathExtent - 1); FxParseConditional(subexpression, ',', p, q); for (alpha = 0.0;;) { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); } FxReturn(alpha); } break; } case 'G': case 'g': { if (IsFxFunction(expression, "gauss", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(exp((-alpha * alpha / 2.0)) / sqrt(2.0 * MagickPI)); } if (IsFxFunction(expression, "gcd", 3) != MagickFalse) { MagickOffsetType gcd; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); gcd = FxGCD((MagickOffsetType) (alpha + 0.5), (MagickOffsetType) (*beta + 0.5)); FxReturn((double)gcd); } if (LocaleCompare(expression, "g") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression, "h") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (LocaleCompare(expression, "hue") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (IsFxFunction(expression, "hypot", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn(hypot(alpha, *beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression, "k") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'I': case 'i': { if (IsFxFunction(expression, "if", 2) != MagickFalse) { double sans = 0.0; size_t length; length = CopyMagickString(subexpression, expression + 3, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); alpha = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); (void)CopyMagickString(subexpression, q + 1, MagickPathExtent - 1); FxParseConditional(subexpression, ',', p, q); if (fabs(alpha) >= MagickEpsilon) alpha = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, beta, exception); else alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); FxReturn(alpha); } if (LocaleCompare(expression, "intensity") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (IsFxFunction(expression, "int", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(floor(alpha)); } if (IsFxFunction(expression, "isnan", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); FxReturn((double)!!IsNaN(alpha)); } if (LocaleCompare(expression, "i") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression, "j") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); #if defined(MAGICKCORE_HAVE_J0) if (IsFxFunction(expression, "j0", 2) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 2, depth + 1, beta, exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression, "j1", 2) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 2, depth + 1, beta, exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (IsFxFunction(expression, "jinc", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); if (alpha == 0.0) FxReturn(1.0); FxReturn((2.0 * j1((MagickPI * alpha)) / (MagickPI * alpha))); } #endif break; } case 'L': case 'l': { if (IsFxFunction(expression, "ln", 2) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 2, depth + 1, beta, exception); FxReturn(log(alpha)); } if (IsFxFunction(expression, "logtwo", 6) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 6, depth + 1, beta, exception); FxReturn(log10(alpha) / log10(2.0)); } if (IsFxFunction(expression, "log", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression, "lightness") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression, "MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression, "maxima", 6) == 0) break; if (IsFxFunction(expression, "max", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression, "minima", 6) == 0) break; if (IsFxFunction(expression, "min", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(alpha < *beta ? alpha : *beta); } if (IsFxFunction(expression, "mod", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(alpha - floor((alpha * PerceptibleReciprocal(*beta))) * (*beta)); } if (LocaleCompare(expression, "m") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'N': case 'n': { if (IsFxFunction(expression, "not", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn((double)(alpha < MagickEpsilon)); } if (LocaleCompare(expression, "n") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression, "Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression, "o") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression, "phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression, "pi") == 0) FxReturn(MagickPI); if (IsFxFunction(expression, "pow", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(pow(alpha, *beta)); } if (LocaleCompare(expression, "p") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression, "QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression, "QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (IsFxFunction(expression, "rand", 4) != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha = GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (IsFxFunction(expression, "round", 5) != MagickFalse) { /* * Round the fraction to nearest integer. */ alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); if ((alpha - floor(alpha)) < (ceil(alpha) - alpha)) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression, "r") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'S': case 's': { if (LocaleCompare(expression, "saturation") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); if (IsFxFunction(expression, "sign", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (IsFxFunction(expression, "sinc", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); if (alpha == 0) FxReturn(1.0); FxReturn(sin((MagickPI * alpha)) / (MagickPI * alpha)); } if (IsFxFunction(expression, "sinh", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(sinh(alpha)); } if (IsFxFunction(expression, "sin", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(sin(alpha)); } if (IsFxFunction(expression, "sqrt", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(sqrt(alpha)); } if (IsFxFunction(expression, "squish", 6) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 6, depth + 1, beta, exception); FxReturn((1.0 / (1.0 + exp(-alpha)))); } if (LocaleCompare(expression, "s") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'T': case 't': { if (IsFxFunction(expression, "tanh", 4) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 4, depth + 1, beta, exception); FxReturn(tanh(alpha)); } if (IsFxFunction(expression, "tan", 3) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 3, depth + 1, beta, exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression, "Transparent") == 0) FxReturn(0.0); if (IsFxFunction(expression, "trunc", 5) != MagickFalse) { alpha = FxEvaluateSubexpression(fx_info, channel, x, y, expression + 5, depth + 1, beta, exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression, "t") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression, "u") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression, "v") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'W': case 'w': { if (IsFxFunction(expression, "while", 5) != MagickFalse) { size_t length; /* * Parse while(condition test, expression). */ length = CopyMagickString(subexpression, expression + 6, MagickPathExtent - 1); if (length != 0) subexpression[length - 1] = '\0'; FxParseConditional(subexpression, ',', p, q); for (alpha = 0.0;;) { gamma = FxEvaluateSubexpression(fx_info, channel, x, y, p, depth + 1, &sans, exception); if (fabs(gamma) < MagickEpsilon) break; alpha = FxEvaluateSubexpression(fx_info, channel, x, y, q + 1, depth + 1, beta, exception); } FxReturn(alpha); } if (LocaleCompare(expression, "w") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression, "y") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression, "z") == 0) FxReturn(FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception)); break; } default: break; } subexpression = DestroyString(subexpression); q = (char *)expression; alpha = InterpretSiPrefixValue(expression, &q); if (q == expression) alpha = FxGetSymbol(fx_info, channel, x, y, expression, depth + 1, exception); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo * fx_info, double *alpha, ExceptionInfo * exception) { MagickBooleanType status; status = FxEvaluateChannelExpression(fx_info, GrayPixelChannel, 0, 0, alpha, exception); return (status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo * fx_info, double *alpha, ExceptionInfo * exception) { FILE * file; MagickBooleanType status; file = fx_info->file; fx_info->file = (FILE *) NULL; status = FxEvaluateChannelExpression(fx_info, GrayPixelChannel, 0, 0, alpha, exception); fx_info->file = file; return (status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo * fx_info, const PixelChannel channel, const ssize_t x, const ssize_t y, double *alpha, ExceptionInfo * exception) { double beta; beta = 0.0; *alpha = FxEvaluateSubexpression(fx_info, channel, x, y, fx_info->expression, 0, &beta, exception); return (exception->severity == OptionError ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % F x I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % FxImage() applies a mathematical expression to the specified * image. % % The format of the FxImage method is: % % Image * *FxImage(const Image *image,const char *expression, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o expression: A mathematical expression. % % o * exception: return any errors or warnings in this structure. % */ static FxInfo ** DestroyFxThreadSet(FxInfo ** fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i = 0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i] = DestroyFxInfo(fx_info[i]); fx_info = (FxInfo **) RelinquishMagickMemory(fx_info); return (fx_info); } static FxInfo ** AcquireFxThreadSet(const Image * image, const char *expression, ExceptionInfo * exception) { char *fx_expression; double alpha; FxInfo ** fx_info; register ssize_t i; size_t number_threads; number_threads = (size_t) GetMagickResourceLimit(ThreadResource); fx_info = (FxInfo **) AcquireQuantumMemory(number_threads, sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); return ((FxInfo **) NULL); } (void)memset(fx_info, 0, number_threads * sizeof(*fx_info)); if (*expression != '@') fx_expression = ConstantString(expression); else fx_expression = FileToString(expression + 1, ~0UL, exception); for (i = 0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i] = AcquireFxInfo(image, fx_expression, exception); if (fx_info[i] == (FxInfo *) NULL) break; status = FxPreprocessExpression(fx_info[i], &alpha, exception); if (status == MagickFalse) break; } fx_expression = DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info = DestroyFxThreadSet(fx_info); return (fx_info); } MagickExport Image * FxImage(const Image * image, const char *expression, ExceptionInfo * exception) { #define FxImageTag "Fx/Image" CacheView * fx_view, *image_view; FxInfo ** magick_restrict fx_info; Image * fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (expression == (const char *)NULL) return (CloneImage(image, 0, 0, MagickTrue, exception)); fx_info = AcquireFxThreadSet(image, expression, exception); if (fx_info == (FxInfo **) NULL) return ((Image *) NULL); fx_image = CloneImage(image, 0, 0, MagickTrue, exception); if (fx_image == (Image *) NULL) { fx_info = DestroyFxThreadSet(fx_info); return ((Image *) NULL); } if (SetImageStorageClass(fx_image, DirectClass, exception) == MagickFalse) { fx_info = DestroyFxThreadSet(fx_info); fx_image = DestroyImage(fx_image); return ((Image *) NULL); } /* * Fx image. */ status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); fx_view = AcquireAuthenticCacheView(fx_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y = 0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); q = QueueCacheViewAuthenticPixels(fx_view, 0, y, fx_image->columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait fx_traits = GetPixelChannelTraits(fx_image, channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if ((fx_traits & CopyPixelTrait) != 0) { SetPixelChannel(fx_image, channel, p[i], q); continue; } alpha = 0.0; (void)FxEvaluateChannelExpression(fx_info[id], channel, x, y, &alpha, exception); q[i] = ClampToQuantum(QuantumRange * alpha); } p += GetPixelChannels(image); q += GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, FxImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } fx_view = DestroyCacheView(fx_view); image_view = DestroyCacheView(image_view); fx_info = DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image = DestroyImage(fx_image); return (fx_image); }
GB_binop__rdiv_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int16) // A*D function (colscale): GB (_AxD__rdiv_int16) // D*A function (rowscale): GB (_DxB__rdiv_int16) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int16) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int16) // C=scalar+B GB (_bind1st__rdiv_int16) // C=scalar+B' GB (_bind1st_tran__rdiv_int16) // C=A+scalar GB (_bind2nd__rdiv_int16) // C=A'+scalar GB (_bind2nd_tran__rdiv_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT16 || GxB_NO_RDIV_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 16) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 16) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int16) // A*D function (colscale): GB (_AxD__rdiv_int16) // D*A function (rowscale): GB (_DxB__rdiv_int16) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int16) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int16) // C=scalar+B GB (_bind1st__rdiv_int16) // C=scalar+B' GB (_bind1st_tran__rdiv_int16) // C=A+scalar GB (_bind2nd__rdiv_int16) // C=A'+scalar GB (_bind2nd_tran__rdiv_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT16 || GxB_NO_RDIV_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 16) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 16) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int16) // A*D function (colscale): GB (_AxD__rdiv_int16) // D*A function (rowscale): GB (_DxB__rdiv_int16) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int16) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int16) // C=scalar+B GB (_bind1st__rdiv_int16) // C=scalar+B' GB (_bind1st_tran__rdiv_int16) // C=A+scalar GB (_bind2nd__rdiv_int16) // C=A'+scalar GB (_bind2nd_tran__rdiv_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT16 || GxB_NO_RDIV_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 16) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 16) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
contract.c
/* * Copyright (c) 2014-2017 Ilya Kaliman * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <stdlib.h> #include <string.h> #ifdef XM_USE_MPI #include <mpi.h> #endif #include "xm.h" #include "util.h" struct blockpair { xm_dim_t blkidxa, blkidxb; xm_scalar_t alpha; }; void sgemm_(char *, char *, long int *, long int *, long int *, float *, float *, long int *, float *, long int *, float *, float *, long int *); void cgemm_(char *, char *, long int *, long int *, long int *, float complex *, float complex *, long int *, float complex *, long int *, float complex *, float complex *, long int *); void dgemm_(char *, char *, long int *, long int *, long int *, double *, double *, long int *, double *, long int *, double *, double *, long int *); void zgemm_(char *, char *, long int *, long int *, long int *, double complex *, double complex *, long int *, double complex *, long int *, double complex *, double complex *, long int *); static void xgemm(char transa, char transb, long int m, long int n, long int k, xm_scalar_t alpha, void *a, long int lda, void *b, long int ldb, xm_scalar_t beta, void *c, long int ldc, int type) { switch (type) { case XM_SCALAR_FLOAT: { float al = alpha, bt = beta; sgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_FLOAT_COMPLEX: { float complex al = alpha, bt = beta; cgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_DOUBLE: { double al = alpha, bt = beta; dgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_DOUBLE_COMPLEX: { double complex al = alpha, bt = beta; zgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } } } static void compute_block(xm_scalar_t alpha, const xm_tensor_t *a, const xm_tensor_t *b, xm_scalar_t beta, xm_tensor_t *c, xm_dim_t cidxa, xm_dim_t aidxa, xm_dim_t cidxb, xm_dim_t aidxb, xm_dim_t cidxc, xm_dim_t aidxc, xm_dim_t blkidxc, struct blockpair *pairs, void *buf) { size_t maxblockbytesa = xm_tensor_get_largest_block_bytes(a); size_t maxblockbytesb = xm_tensor_get_largest_block_bytes(b); size_t maxblockbytesc = xm_tensor_get_largest_block_bytes(c); xm_dim_t dims, blkidxa, blkidxb, nblocksa, nblocksb; void *bufa1, *bufa2, *bufb1, *bufb2, *bufc1, *bufc2; size_t i, j, m, n, k, nblkk, blksize; int type; bufa1 = buf; bufa2 = (char *)bufa1 + maxblockbytesa; bufb1 = (char *)bufa2 + maxblockbytesa; bufb2 = (char *)bufb1 + maxblockbytesb; bufc1 = (char *)bufb2 + maxblockbytesb; bufc2 = (char *)bufc1 + maxblockbytesc; type = xm_tensor_get_scalar_type(c); nblocksa = xm_tensor_get_nblocks(a); nblocksb = xm_tensor_get_nblocks(b); nblkk = xm_dim_dot_mask(&nblocksa, &cidxa); dims = xm_tensor_get_block_dims(c, blkidxc); m = xm_dim_dot_mask(&dims, &cidxc); n = xm_dim_dot_mask(&dims, &aidxc); xm_tensor_read_block(c, blkidxc, bufc2); if (aidxc.n > 0 && aidxc.i[0] == 0) xm_tensor_unfold_block(c, blkidxc, aidxc, cidxc, bufc2, bufc1, n); else xm_tensor_unfold_block(c, blkidxc, cidxc, aidxc, bufc2, bufc1, m); blksize = xm_tensor_get_block_size(c, blkidxc); xm_scalar_mul(bufc1, blksize, type, beta); if (alpha == 0) goto done; blkidxa = xm_dim_zero(nblocksa.n); blkidxb = xm_dim_zero(nblocksb.n); xm_dim_set_mask(&blkidxa, &aidxa, &blkidxc, &cidxc); xm_dim_set_mask(&blkidxb, &aidxb, &blkidxc, &aidxc); for (i = 0; i < nblkk; i++) { int blktypea = xm_tensor_get_block_type(a, blkidxa); int blktypeb = xm_tensor_get_block_type(b, blkidxb); pairs[i].alpha = 0; pairs[i].blkidxa = blkidxa; pairs[i].blkidxb = blkidxb; if (blktypea != XM_BLOCK_TYPE_ZERO && blktypeb != XM_BLOCK_TYPE_ZERO) { xm_scalar_t sa = xm_tensor_get_block_scalar(a, blkidxa); xm_scalar_t sb = xm_tensor_get_block_scalar(b, blkidxb); pairs[i].alpha = sa * sb; } xm_dim_inc_mask(&blkidxa, &nblocksa, &cidxa); xm_dim_inc_mask(&blkidxb, &nblocksb, &cidxb); } for (i = 0; i < nblkk; i++) { if (pairs[i].alpha == 0) continue; for (j = i+1; j < nblkk; j++) { xm_dim_t dia, dja, dib, djb, pia, pja, pib, pjb; size_t ii, good = 1; if (pairs[j].alpha == 0) continue; dia = pairs[i].blkidxa; dja = pairs[j].blkidxa; dib = pairs[i].blkidxb; djb = pairs[j].blkidxb; if (xm_tensor_get_block_data_ptr(a, dia) != xm_tensor_get_block_data_ptr(a, dja) || xm_tensor_get_block_data_ptr(b, dib) != xm_tensor_get_block_data_ptr(b, djb)) continue; pia = xm_tensor_get_block_permutation(a, dia); pja = xm_tensor_get_block_permutation(a, dja); pib = xm_tensor_get_block_permutation(b, dib); pjb = xm_tensor_get_block_permutation(b, djb); for (ii = 0; ii < aidxa.n && good; ii++) { if (pia.i[aidxa.i[ii]] != pja.i[aidxa.i[ii]]) good = 0; } for (ii = 0; ii < aidxb.n && good; ii++) { if (pib.i[aidxb.i[ii]] != pjb.i[aidxb.i[ii]]) good = 0; } if (good) { pairs[i].alpha += pairs[j].alpha; pairs[j].alpha = 0; } } } for (i = 0; i < nblkk; i++) { if (pairs[i].alpha != 0) { blkidxa = pairs[i].blkidxa; blkidxb = pairs[i].blkidxb; dims = xm_tensor_get_block_dims(a, blkidxa); k = xm_dim_dot_mask(&dims, &cidxa); xm_tensor_read_block(a, blkidxa, bufa1); xm_tensor_unfold_block(a, blkidxa, cidxa, aidxa, bufa1, bufa2, k); xm_tensor_read_block(b, blkidxb, bufb1); xm_tensor_unfold_block(b, blkidxb, cidxb, aidxb, bufb1, bufb2, k); if (aidxc.n > 0 && aidxc.i[0] == 0) { xgemm('T', 'N', (int)n, (int)m, (int)k, alpha*pairs[i].alpha, bufb2, (int)k, bufa2, (int)k, 1.0, bufc1, (int)n, type); } else { xgemm('T', 'N', (int)m, (int)n, (int)k, alpha*pairs[i].alpha, bufa2, (int)k, bufb2, (int)k, 1.0, bufc1, (int)m, type); } } } done: if (aidxc.n > 0 && aidxc.i[0] == 0) xm_tensor_fold_block(c, blkidxc, aidxc, cidxc, bufc1, bufc2, n); else xm_tensor_fold_block(c, blkidxc, cidxc, aidxc, bufc1, bufc2, m); xm_tensor_write_block(c, blkidxc, bufc2); } void xm_contract(xm_scalar_t alpha, const xm_tensor_t *a, const xm_tensor_t *b, xm_scalar_t beta, xm_tensor_t *c, const char *idxa, const char *idxb, const char *idxc) { const xm_block_space_t *bsa, *bsb, *bsc; xm_dim_t nblocksa, cidxa, aidxa, cidxb, aidxb, cidxc, aidxc, *blklist; size_t i, bufbytes, nblkk, nblklist; int mpirank = 0, mpisize = 1; if (xm_tensor_get_allocator(a) != xm_tensor_get_allocator(c) || xm_tensor_get_allocator(b) != xm_tensor_get_allocator(c)) fatal("tensors must use same allocator"); if (xm_tensor_get_scalar_type(a) != xm_tensor_get_scalar_type(c) || xm_tensor_get_scalar_type(b) != xm_tensor_get_scalar_type(c)) fatal("tensors must have same scalar type"); #ifdef XM_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpirank); MPI_Comm_size(MPI_COMM_WORLD, &mpisize); #endif bsa = xm_tensor_get_block_space(a); bsb = xm_tensor_get_block_space(b); bsc = xm_tensor_get_block_space(c); if (strlen(idxa) != xm_block_space_get_ndims(bsa)) fatal("bad contraction indices"); if (strlen(idxb) != xm_block_space_get_ndims(bsb)) fatal("bad contraction indices"); if (strlen(idxc) != xm_block_space_get_ndims(bsc)) fatal("bad contraction indices"); xm_make_masks(idxa, idxb, &cidxa, &cidxb); xm_make_masks(idxc, idxa, &cidxc, &aidxa); xm_make_masks(idxc, idxb, &aidxc, &aidxb); if (aidxa.n + cidxa.n != xm_block_space_get_ndims(bsa)) fatal("bad contraction indices"); if (aidxb.n + cidxb.n != xm_block_space_get_ndims(bsb)) fatal("bad contraction indices"); if (aidxc.n + cidxc.n != xm_block_space_get_ndims(bsc)) fatal("bad contraction indices"); if (!(aidxc.n > 0 && aidxc.i[0] == 0) && !(cidxc.n > 0 && cidxc.i[0] == 0)) fatal("bad contraction indices"); for (i = 0; i < cidxa.n; i++) if (!xm_block_space_eq1(bsa, cidxa.i[i], bsb, cidxb.i[i])) fatal("inconsistent a and b tensor block-spaces"); for (i = 0; i < cidxc.n; i++) if (!xm_block_space_eq1(bsc, cidxc.i[i], bsa, aidxa.i[i])) fatal("inconsistent a and c tensor block-spaces"); for (i = 0; i < aidxc.n; i++) if (!xm_block_space_eq1(bsc, aidxc.i[i], bsb, aidxb.i[i])) fatal("inconsistent b and c tensor block-spaces"); nblocksa = xm_tensor_get_nblocks(a); nblkk = xm_dim_dot_mask(&nblocksa, &cidxa); bufbytes = 2 * (xm_tensor_get_largest_block_bytes(a) + xm_tensor_get_largest_block_bytes(b) + xm_tensor_get_largest_block_bytes(c)); xm_tensor_get_canonical_block_list(c, &blklist, &nblklist); #ifdef _OPENMP #pragma omp parallel private(i) #endif { struct blockpair *pairs; void *buf; if ((pairs = malloc(nblkk * sizeof *pairs)) == NULL) fatal("out of memory"); if ((buf = malloc(bufbytes)) == NULL) fatal("out of memory"); #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = 0; i < nblklist; i++) { if ((int)i % mpisize == mpirank) compute_block(alpha, a, b, beta, c, cidxa, aidxa, cidxb, aidxb, cidxc, aidxc, blklist[i], pairs, buf); } free(buf); free(pairs); } free(blklist); #ifdef XM_USE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif }
#include <stdlib.h> #include <string.h> #ifdef XM_USE_MPI #include <mpi.h> #endif #include "xm.h" #include "util.h" struct blockpair { xm_dim_t blkidxa, blkidxb; xm_scalar_t alpha; }; void sgemm_(char *, char *, long int *, long int *, long int *, float *, float *, long int *, float *, long int *, float *, float *, long int *); void cgemm_(char *, char *, long int *, long int *, long int *, float complex *, float complex *, long int *, float complex *, long int *, float complex *, float complex *, long int *); void dgemm_(char *, char *, long int *, long int *, long int *, double *, double *, long int *, double *, long int *, double *, double *, long int *); void zgemm_(char *, char *, long int *, long int *, long int *, double complex *, double complex *, long int *, double complex *, long int *, double complex *, double complex *, long int *); static void xgemm(char transa, char transb, long int m, long int n, long int k, xm_scalar_t alpha, void *a, long int lda, void *b, long int ldb, xm_scalar_t beta, void *c, long int ldc, int type) { switch (type) { case XM_SCALAR_FLOAT: { float al = alpha, bt = beta; sgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_FLOAT_COMPLEX: { float complex al = alpha, bt = beta; cgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_DOUBLE: { double al = alpha, bt = beta; dgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_DOUBLE_COMPLEX: { double complex al = alpha, bt = beta; zgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } } } static void compute_block(xm_scalar_t alpha, const xm_tensor_t * a, const xm_tensor_t * b, xm_scalar_t beta, xm_tensor_t * c, xm_dim_t cidxa, xm_dim_t aidxa, xm_dim_t cidxb, xm_dim_t aidxb, xm_dim_t cidxc, xm_dim_t aidxc, xm_dim_t blkidxc, struct blockpair *pairs, void *buf) { size_t maxblockbytesa = xm_tensor_get_largest_block_bytes(a); size_t maxblockbytesb = xm_tensor_get_largest_block_bytes(b); size_t maxblockbytesc = xm_tensor_get_largest_block_bytes(c); xm_dim_t dims, blkidxa, blkidxb, nblocksa, nblocksb; void *bufa1, *bufa2, *bufb1, *bufb2, *bufc1, *bufc2; size_t i, j, m, n, k, nblkk, blksize; int type; bufa1 = buf; bufa2 = (char *)bufa1 + maxblockbytesa; bufb1 = (char *)bufa2 + maxblockbytesa; bufb2 = (char *)bufb1 + maxblockbytesb; bufc1 = (char *)bufb2 + maxblockbytesb; bufc2 = (char *)bufc1 + maxblockbytesc; type = xm_tensor_get_scalar_type(c); nblocksa = xm_tensor_get_nblocks(a); nblocksb = xm_tensor_get_nblocks(b); nblkk = xm_dim_dot_mask(&nblocksa, &cidxa); dims = xm_tensor_get_block_dims(c, blkidxc); m = xm_dim_dot_mask(&dims, &cidxc); n = xm_dim_dot_mask(&dims, &aidxc); xm_tensor_read_block(c, blkidxc, bufc2); if (aidxc.n > 0 && aidxc.i[0] == 0) xm_tensor_unfold_block(c, blkidxc, aidxc, cidxc, bufc2, bufc1, n); else xm_tensor_unfold_block(c, blkidxc, cidxc, aidxc, bufc2, bufc1, m); blksize = xm_tensor_get_block_size(c, blkidxc); xm_scalar_mul(bufc1, blksize, type, beta); if (alpha == 0) goto done; blkidxa = xm_dim_zero(nblocksa.n); blkidxb = xm_dim_zero(nblocksb.n); xm_dim_set_mask(&blkidxa, &aidxa, &blkidxc, &cidxc); xm_dim_set_mask(&blkidxb, &aidxb, &blkidxc, &aidxc); for (i = 0; i < nblkk; i++) { int blktypea = xm_tensor_get_block_type(a, blkidxa); int blktypeb = xm_tensor_get_block_type(b, blkidxb); pairs[i].alpha = 0; pairs[i].blkidxa = blkidxa; pairs[i].blkidxb = blkidxb; if (blktypea != XM_BLOCK_TYPE_ZERO && blktypeb != XM_BLOCK_TYPE_ZERO) { xm_scalar_t sa = xm_tensor_get_block_scalar(a, blkidxa); xm_scalar_t sb = xm_tensor_get_block_scalar(b, blkidxb); pairs[i].alpha = sa * sb; } xm_dim_inc_mask(&blkidxa, &nblocksa, &cidxa); xm_dim_inc_mask(&blkidxb, &nblocksb, &cidxb); } for (i = 0; i < nblkk; i++) { if (pairs[i].alpha == 0) continue; for (j = i + 1; j < nblkk; j++) { xm_dim_t dia, dja, dib, djb, pia, pja, pib, pjb; size_t ii, good = 1; if (pairs[j].alpha == 0) continue; dia = pairs[i].blkidxa; dja = pairs[j].blkidxa; dib = pairs[i].blkidxb; djb = pairs[j].blkidxb; if (xm_tensor_get_block_data_ptr(a, dia) != xm_tensor_get_block_data_ptr(a, dja) || xm_tensor_get_block_data_ptr(b, dib) != xm_tensor_get_block_data_ptr(b, djb)) continue; pia = xm_tensor_get_block_permutation(a, dia); pja = xm_tensor_get_block_permutation(a, dja); pib = xm_tensor_get_block_permutation(b, dib); pjb = xm_tensor_get_block_permutation(b, djb); for (ii = 0; ii < aidxa.n && good; ii++) { if (pia.i[aidxa.i[ii]] != pja.i[aidxa.i[ii]]) good = 0; } for (ii = 0; ii < aidxb.n && good; ii++) { if (pib.i[aidxb.i[ii]] != pjb.i[aidxb.i[ii]]) good = 0; } if (good) { pairs[i].alpha += pairs[j].alpha; pairs[j].alpha = 0; } } } for (i = 0; i < nblkk; i++) { if (pairs[i].alpha != 0) { blkidxa = pairs[i].blkidxa; blkidxb = pairs[i].blkidxb; dims = xm_tensor_get_block_dims(a, blkidxa); k = xm_dim_dot_mask(&dims, &cidxa); xm_tensor_read_block(a, blkidxa, bufa1); xm_tensor_unfold_block(a, blkidxa, cidxa, aidxa, bufa1, bufa2, k); xm_tensor_read_block(b, blkidxb, bufb1); xm_tensor_unfold_block(b, blkidxb, cidxb, aidxb, bufb1, bufb2, k); if (aidxc.n > 0 && aidxc.i[0] == 0) { xgemm('T', 'N', (int)n, (int)m, (int)k, alpha * pairs[i].alpha, bufb2, (int)k, bufa2, (int)k, 1.0, bufc1, (int)n, type); } else { xgemm('T', 'N', (int)m, (int)n, (int)k, alpha * pairs[i].alpha, bufa2, (int)k, bufb2, (int)k, 1.0, bufc1, (int)m, type); } } } done: if (aidxc.n > 0 && aidxc.i[0] == 0) xm_tensor_fold_block(c, blkidxc, aidxc, cidxc, bufc1, bufc2, n); else xm_tensor_fold_block(c, blkidxc, cidxc, aidxc, bufc1, bufc2, m); xm_tensor_write_block(c, blkidxc, bufc2); } void xm_contract(xm_scalar_t alpha, const xm_tensor_t * a, const xm_tensor_t * b, xm_scalar_t beta, xm_tensor_t * c, const char *idxa, const char *idxb, const char *idxc) { const xm_block_space_t *bsa, *bsb, *bsc; xm_dim_t nblocksa, cidxa, aidxa, cidxb, aidxb, cidxc, aidxc, *blklist; size_t i, bufbytes, nblkk, nblklist; int mpirank = 0, mpisize = 1; if (xm_tensor_get_allocator(a) != xm_tensor_get_allocator(c) || xm_tensor_get_allocator(b) != xm_tensor_get_allocator(c)) fatal("tensors must use same allocator"); if (xm_tensor_get_scalar_type(a) != xm_tensor_get_scalar_type(c) || xm_tensor_get_scalar_type(b) != xm_tensor_get_scalar_type(c)) fatal("tensors must have same scalar type"); #ifdef XM_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpirank); MPI_Comm_size(MPI_COMM_WORLD, &mpisize); #endif bsa = xm_tensor_get_block_space(a); bsb = xm_tensor_get_block_space(b); bsc = xm_tensor_get_block_space(c); if (strlen(idxa) != xm_block_space_get_ndims(bsa)) fatal("bad contraction indices"); if (strlen(idxb) != xm_block_space_get_ndims(bsb)) fatal("bad contraction indices"); if (strlen(idxc) != xm_block_space_get_ndims(bsc)) fatal("bad contraction indices"); xm_make_masks(idxa, idxb, &cidxa, &cidxb); xm_make_masks(idxc, idxa, &cidxc, &aidxa); xm_make_masks(idxc, idxb, &aidxc, &aidxb); if (aidxa.n + cidxa.n != xm_block_space_get_ndims(bsa)) fatal("bad contraction indices"); if (aidxb.n + cidxb.n != xm_block_space_get_ndims(bsb)) fatal("bad contraction indices"); if (aidxc.n + cidxc.n != xm_block_space_get_ndims(bsc)) fatal("bad contraction indices"); if (!(aidxc.n > 0 && aidxc.i[0] == 0) && !(cidxc.n > 0 && cidxc.i[0] == 0)) fatal("bad contraction indices"); for (i = 0; i < cidxa.n; i++) if (!xm_block_space_eq1(bsa, cidxa.i[i], bsb, cidxb.i[i])) fatal("inconsistent a and b tensor block-spaces"); for (i = 0; i < cidxc.n; i++) if (!xm_block_space_eq1(bsc, cidxc.i[i], bsa, aidxa.i[i])) fatal("inconsistent a and c tensor block-spaces"); for (i = 0; i < aidxc.n; i++) if (!xm_block_space_eq1(bsc, aidxc.i[i], bsb, aidxb.i[i])) fatal("inconsistent b and c tensor block-spaces"); nblocksa = xm_tensor_get_nblocks(a); nblkk = xm_dim_dot_mask(&nblocksa, &cidxa); bufbytes = 2 * (xm_tensor_get_largest_block_bytes(a) + xm_tensor_get_largest_block_bytes(b) + xm_tensor_get_largest_block_bytes(c)); xm_tensor_get_canonical_block_list(c, &blklist, &nblklist); { struct blockpair *pairs; void *buf; if ((pairs = malloc(nblkk * sizeof *pairs)) == NULL) fatal("out of memory"); if ((buf = malloc(bufbytes)) == NULL) fatal("out of memory"); for (i = 0; i < nblklist; i++) { if ((int)i % mpisize == mpirank) compute_block(alpha, a, b, beta, c, cidxa, aidxa, cidxb, aidxb, cidxc, aidxc, blklist[i], pairs, buf); } free(buf); free(pairs); } free(blklist); #ifdef XM_USE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif }
#include <stdlib.h> #include <string.h> #ifdef XM_USE_MPI #include <mpi.h> #endif #include "xm.h" #include "util.h" struct blockpair { xm_dim_t blkidxa, blkidxb; xm_scalar_t alpha; }; void sgemm_(char *, char *, long int *, long int *, long int *, float *, float *, long int *, float *, long int *, float *, float *, long int *); void cgemm_(char *, char *, long int *, long int *, long int *, float complex *, float complex *, long int *, float complex *, long int *, float complex *, float complex *, long int *); void dgemm_(char *, char *, long int *, long int *, long int *, double *, double *, long int *, double *, long int *, double *, double *, long int *); void zgemm_(char *, char *, long int *, long int *, long int *, double complex *, double complex *, long int *, double complex *, long int *, double complex *, double complex *, long int *); static void xgemm(char transa, char transb, long int m, long int n, long int k, xm_scalar_t alpha, void *a, long int lda, void *b, long int ldb, xm_scalar_t beta, void *c, long int ldc, int type) { switch (type) { case XM_SCALAR_FLOAT: { float al = alpha, bt = beta; sgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_FLOAT_COMPLEX: { float complex al = alpha, bt = beta; cgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_DOUBLE: { double al = alpha, bt = beta; dgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } case XM_SCALAR_DOUBLE_COMPLEX: { double complex al = alpha, bt = beta; zgemm_(&transa, &transb, &m, &n, &k, &al, a, &lda, b, &ldb, &bt, c, &ldc); return; } } } static void compute_block(xm_scalar_t alpha, const xm_tensor_t * a, const xm_tensor_t * b, xm_scalar_t beta, xm_tensor_t * c, xm_dim_t cidxa, xm_dim_t aidxa, xm_dim_t cidxb, xm_dim_t aidxb, xm_dim_t cidxc, xm_dim_t aidxc, xm_dim_t blkidxc, struct blockpair *pairs, void *buf) { size_t maxblockbytesa = xm_tensor_get_largest_block_bytes(a); size_t maxblockbytesb = xm_tensor_get_largest_block_bytes(b); size_t maxblockbytesc = xm_tensor_get_largest_block_bytes(c); xm_dim_t dims, blkidxa, blkidxb, nblocksa, nblocksb; void *bufa1, *bufa2, *bufb1, *bufb2, *bufc1, *bufc2; size_t i, j, m, n, k, nblkk, blksize; int type; bufa1 = buf; bufa2 = (char *)bufa1 + maxblockbytesa; bufb1 = (char *)bufa2 + maxblockbytesa; bufb2 = (char *)bufb1 + maxblockbytesb; bufc1 = (char *)bufb2 + maxblockbytesb; bufc2 = (char *)bufc1 + maxblockbytesc; type = xm_tensor_get_scalar_type(c); nblocksa = xm_tensor_get_nblocks(a); nblocksb = xm_tensor_get_nblocks(b); nblkk = xm_dim_dot_mask(&nblocksa, &cidxa); dims = xm_tensor_get_block_dims(c, blkidxc); m = xm_dim_dot_mask(&dims, &cidxc); n = xm_dim_dot_mask(&dims, &aidxc); xm_tensor_read_block(c, blkidxc, bufc2); if (aidxc.n > 0 && aidxc.i[0] == 0) xm_tensor_unfold_block(c, blkidxc, aidxc, cidxc, bufc2, bufc1, n); else xm_tensor_unfold_block(c, blkidxc, cidxc, aidxc, bufc2, bufc1, m); blksize = xm_tensor_get_block_size(c, blkidxc); xm_scalar_mul(bufc1, blksize, type, beta); if (alpha == 0) goto done; blkidxa = xm_dim_zero(nblocksa.n); blkidxb = xm_dim_zero(nblocksb.n); xm_dim_set_mask(&blkidxa, &aidxa, &blkidxc, &cidxc); xm_dim_set_mask(&blkidxb, &aidxb, &blkidxc, &aidxc); for (i = 0; i < nblkk; i++) { int blktypea = xm_tensor_get_block_type(a, blkidxa); int blktypeb = xm_tensor_get_block_type(b, blkidxb); pairs[i].alpha = 0; pairs[i].blkidxa = blkidxa; pairs[i].blkidxb = blkidxb; if (blktypea != XM_BLOCK_TYPE_ZERO && blktypeb != XM_BLOCK_TYPE_ZERO) { xm_scalar_t sa = xm_tensor_get_block_scalar(a, blkidxa); xm_scalar_t sb = xm_tensor_get_block_scalar(b, blkidxb); pairs[i].alpha = sa * sb; } xm_dim_inc_mask(&blkidxa, &nblocksa, &cidxa); xm_dim_inc_mask(&blkidxb, &nblocksb, &cidxb); } for (i = 0; i < nblkk; i++) { if (pairs[i].alpha == 0) continue; for (j = i + 1; j < nblkk; j++) { xm_dim_t dia, dja, dib, djb, pia, pja, pib, pjb; size_t ii, good = 1; if (pairs[j].alpha == 0) continue; dia = pairs[i].blkidxa; dja = pairs[j].blkidxa; dib = pairs[i].blkidxb; djb = pairs[j].blkidxb; if (xm_tensor_get_block_data_ptr(a, dia) != xm_tensor_get_block_data_ptr(a, dja) || xm_tensor_get_block_data_ptr(b, dib) != xm_tensor_get_block_data_ptr(b, djb)) continue; pia = xm_tensor_get_block_permutation(a, dia); pja = xm_tensor_get_block_permutation(a, dja); pib = xm_tensor_get_block_permutation(b, dib); pjb = xm_tensor_get_block_permutation(b, djb); for (ii = 0; ii < aidxa.n && good; ii++) { if (pia.i[aidxa.i[ii]] != pja.i[aidxa.i[ii]]) good = 0; } for (ii = 0; ii < aidxb.n && good; ii++) { if (pib.i[aidxb.i[ii]] != pjb.i[aidxb.i[ii]]) good = 0; } if (good) { pairs[i].alpha += pairs[j].alpha; pairs[j].alpha = 0; } } } for (i = 0; i < nblkk; i++) { if (pairs[i].alpha != 0) { blkidxa = pairs[i].blkidxa; blkidxb = pairs[i].blkidxb; dims = xm_tensor_get_block_dims(a, blkidxa); k = xm_dim_dot_mask(&dims, &cidxa); xm_tensor_read_block(a, blkidxa, bufa1); xm_tensor_unfold_block(a, blkidxa, cidxa, aidxa, bufa1, bufa2, k); xm_tensor_read_block(b, blkidxb, bufb1); xm_tensor_unfold_block(b, blkidxb, cidxb, aidxb, bufb1, bufb2, k); if (aidxc.n > 0 && aidxc.i[0] == 0) { xgemm('T', 'N', (int)n, (int)m, (int)k, alpha * pairs[i].alpha, bufb2, (int)k, bufa2, (int)k, 1.0, bufc1, (int)n, type); } else { xgemm('T', 'N', (int)m, (int)n, (int)k, alpha * pairs[i].alpha, bufa2, (int)k, bufb2, (int)k, 1.0, bufc1, (int)m, type); } } } done: if (aidxc.n > 0 && aidxc.i[0] == 0) xm_tensor_fold_block(c, blkidxc, aidxc, cidxc, bufc1, bufc2, n); else xm_tensor_fold_block(c, blkidxc, cidxc, aidxc, bufc1, bufc2, m); xm_tensor_write_block(c, blkidxc, bufc2); } void xm_contract(xm_scalar_t alpha, const xm_tensor_t * a, const xm_tensor_t * b, xm_scalar_t beta, xm_tensor_t * c, const char *idxa, const char *idxb, const char *idxc) { const xm_block_space_t *bsa, *bsb, *bsc; xm_dim_t nblocksa, cidxa, aidxa, cidxb, aidxb, cidxc, aidxc, *blklist; size_t i, bufbytes, nblkk, nblklist; int mpirank = 0, mpisize = 1; if (xm_tensor_get_allocator(a) != xm_tensor_get_allocator(c) || xm_tensor_get_allocator(b) != xm_tensor_get_allocator(c)) fatal("tensors must use same allocator"); if (xm_tensor_get_scalar_type(a) != xm_tensor_get_scalar_type(c) || xm_tensor_get_scalar_type(b) != xm_tensor_get_scalar_type(c)) fatal("tensors must have same scalar type"); #ifdef XM_USE_MPI MPI_Comm_rank(MPI_COMM_WORLD, &mpirank); MPI_Comm_size(MPI_COMM_WORLD, &mpisize); #endif bsa = xm_tensor_get_block_space(a); bsb = xm_tensor_get_block_space(b); bsc = xm_tensor_get_block_space(c); if (strlen(idxa) != xm_block_space_get_ndims(bsa)) fatal("bad contraction indices"); if (strlen(idxb) != xm_block_space_get_ndims(bsb)) fatal("bad contraction indices"); if (strlen(idxc) != xm_block_space_get_ndims(bsc)) fatal("bad contraction indices"); xm_make_masks(idxa, idxb, &cidxa, &cidxb); xm_make_masks(idxc, idxa, &cidxc, &aidxa); xm_make_masks(idxc, idxb, &aidxc, &aidxb); if (aidxa.n + cidxa.n != xm_block_space_get_ndims(bsa)) fatal("bad contraction indices"); if (aidxb.n + cidxb.n != xm_block_space_get_ndims(bsb)) fatal("bad contraction indices"); if (aidxc.n + cidxc.n != xm_block_space_get_ndims(bsc)) fatal("bad contraction indices"); if (!(aidxc.n > 0 && aidxc.i[0] == 0) && !(cidxc.n > 0 && cidxc.i[0] == 0)) fatal("bad contraction indices"); for (i = 0; i < cidxa.n; i++) if (!xm_block_space_eq1(bsa, cidxa.i[i], bsb, cidxb.i[i])) fatal("inconsistent a and b tensor block-spaces"); for (i = 0; i < cidxc.n; i++) if (!xm_block_space_eq1(bsc, cidxc.i[i], bsa, aidxa.i[i])) fatal("inconsistent a and c tensor block-spaces"); for (i = 0; i < aidxc.n; i++) if (!xm_block_space_eq1(bsc, aidxc.i[i], bsb, aidxb.i[i])) fatal("inconsistent b and c tensor block-spaces"); nblocksa = xm_tensor_get_nblocks(a); nblkk = xm_dim_dot_mask(&nblocksa, &cidxa); bufbytes = 2 * (xm_tensor_get_largest_block_bytes(a) + xm_tensor_get_largest_block_bytes(b) + xm_tensor_get_largest_block_bytes(c)); xm_tensor_get_canonical_block_list(c, &blklist, &nblklist); #ifdef _OPENMP #pragma omp parallel private(i) #endif { struct blockpair *pairs; void *buf; if ((pairs = malloc(nblkk * sizeof *pairs)) == NULL) fatal("out of memory"); if ((buf = malloc(bufbytes)) == NULL) fatal("out of memory"); #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = 0; i < nblklist; i++) { if ((int)i % mpisize == mpirank) compute_block(alpha, a, b, beta, c, cidxa, aidxa, cidxb, aidxb, cidxc, aidxc, blklist[i], pairs, buf); } free(buf); free(pairs); } free(blklist); #ifdef XM_USE_MPI MPI_Barrier(MPI_COMM_WORLD); #endif }
GB_memset.c
//------------------------------------------------------------------------------ // GB_memset: parallel memset //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Note that this function uses its own hard-coded chunk size. #include "GB.h" #define GB_MEM_CHUNK (1024*1024) void GB_memset // parallel memset ( void *dest, // destination const int c, // value to to set size_t n, // # of bytes to set int nthreads // max # of threads to use ) { if (nthreads <= 1 || n <= GB_MEM_CHUNK) { //---------------------------------------------------------------------- // memset using a single thread //---------------------------------------------------------------------- memset (dest, c, n) ; } else { //---------------------------------------------------------------------- // memset using multiple threads //---------------------------------------------------------------------- size_t nchunks = 1 + (n / GB_MEM_CHUNK) ; if (((size_t) nthreads) > nchunks) { nthreads = (int) nchunks ; } GB_void *pdest = (GB_void *) dest ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (k = 0 ; k < nchunks ; k++) { size_t start = k * GB_MEM_CHUNK ; if (start < n) { size_t chunk = GB_IMIN (n - start, GB_MEM_CHUNK) ; memset (pdest + start, c, chunk) ; } } } }
// ------------------------------------------------------------------------------ //GB_memset:parallel memset // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2022, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ //Note that this function uses its own hard - coded chunk size. #include "GB.h" #define GB_MEM_CHUNK (1024*1024) void GB_memset // parallel memset ( void *dest, //destination const int c, //value to to set size_t n, // #of bytes to set int nthreads // max #of threads to use ) { if (nthreads <= 1 || n <= GB_MEM_CHUNK) { //---------------------------------------------------------------------- //memset using a single thread // ---------------------------------------------------------------------- memset(dest, c, n); } else { //---------------------------------------------------------------------- //memset using multiple threads // ---------------------------------------------------------------------- size_t nchunks = 1 + (n / GB_MEM_CHUNK); if (((size_t) nthreads) > nchunks) { nthreads = (int)nchunks; } GB_void *pdest = (GB_void *) dest; int64_t k; for (k = 0; k < nchunks; k++) { size_t start = k * GB_MEM_CHUNK; if (start < n) { size_t chunk = GB_IMIN(n - start, GB_MEM_CHUNK); memset(pdest + start, c, chunk); } } } }
// ------------------------------------------------------------------------------ //GB_memset:parallel memset // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2022, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ //Note that this function uses its own hard - coded chunk size. #include "GB.h" #define GB_MEM_CHUNK (1024*1024) void GB_memset // parallel memset ( void *dest, //destination const int c, //value to to set size_t n, // #of bytes to set int nthreads // max #of threads to use ) { if (nthreads <= 1 || n <= GB_MEM_CHUNK) { //---------------------------------------------------------------------- //memset using a single thread // ---------------------------------------------------------------------- memset(dest, c, n); } else { //---------------------------------------------------------------------- //memset using multiple threads // ---------------------------------------------------------------------- size_t nchunks = 1 + (n / GB_MEM_CHUNK); if (((size_t) nthreads) > nchunks) { nthreads = (int)nchunks; } GB_void *pdest = (GB_void *) dest; int64_t k; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (k = 0; k < nchunks; k++) { size_t start = k * GB_MEM_CHUNK; if (start < n) { size_t chunk = GB_IMIN(n - start, GB_MEM_CHUNK); memset(pdest + start, c, chunk); } } } }
MeshRefiner.h
/** * @file * This file is part of SeisSol. * * @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger) * * @section LICENSE * Copyright (c) 2015, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION */ #ifndef MESH_REFINER_H_ #define MESH_REFINER_H_ #include <cstring> #include "Geometry/MeshReader.h" #include "RefinerUtils.h" namespace seissol { namespace refinement { //------------------------------------------------------------------------------ template<typename T> class MeshRefiner { private: // m_cells contains the indices of the cells unsigned int* m_cells; T* m_vertices; size_t m_numSubCells; size_t m_numVertices; static const unsigned int kIndicesPerCell = 4; const unsigned int kSubCellsPerCell; public: MeshRefiner(const MeshReader& meshReader, const TetrahedronRefiner<T>& tetRefiner); MeshRefiner(const std::vector<const Element *>& subElements, const std::vector<const Vertex *>& subVertices, const std::map<int, int>& oldToNewVertexMap, const TetrahedronRefiner<T>& tetRefiner); ~MeshRefiner(); const unsigned int* getCellData() const; const T* getVertexData() const; std::size_t getkSubCellsPerCell() const; std::size_t getNumCells() const; std::size_t getNumVertices() const; }; //------------------------------------------------------------------------------ template<typename T> MeshRefiner<T>::MeshRefiner( const MeshReader& meshReader, const TetrahedronRefiner<T>& tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = meshReader.getVertices().size(); const size_t kInCellCount = meshReader.getElements().size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector<Vertex>& kVertices = meshReader.getVertices(); const std::vector<Element>& kElements = meshReader.getElements(); // Copy original vertices #ifdef _OPENMP #pragma omp parallel for #endif // _OPENMP for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i*3], kVertices[i].coords, sizeof(double)*3); } // The pointer to the new vertices T* newVertices = &m_vertices[kInVertexCount*3]; // Start the actual cell-refinement #ifdef _OPENMP #pragma omp parallel { #endif // _OPENMPI Eigen::Matrix<T, 3, 1>* newVerticesTmp = new Eigen::Matrix<T, 3, 1>[additionalVertices]; Tetrahedron<T>* newTetsTmp = new Tetrahedron<T>[kSubCellsPerCell]; #ifdef _OPENMP #pragma omp for schedule(static) nowait #endif // _OPENMP for (size_t c = 0; c < kInCellCount; ++c) { // Build a Terahedron containing the coordinates of the vertices. Tetrahedron<T> inTet = Tetrahedron<T>( kVertices[kElements[c].vertices[0]].coords, kVertices[kElements[c].vertices[1]].coords, kVertices[kElements[c].vertices[2]].coords, kVertices[kElements[c].vertices[3]].coords, kElements[c].vertices[0], kElements[c].vertices[1], kElements[c].vertices[2], kElements[c].vertices[3]); // Generate the tets tetRefiner.refine(inTet, kInVertexCount + c*additionalVertices, newTetsTmp, newVerticesTmp); // Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c*additionalVertices + i) * 3], newVerticesTmp[i].data(), sizeof(T)*3); } // Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c*kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c*kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c*kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c*kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete [] newVerticesTmp; delete [] newTetsTmp; #ifdef _OPENMP } #endif } template<typename T> MeshRefiner<T>::MeshRefiner( const std::vector<const Element *>& subElements, const std::vector<const Vertex *>& subVertices, const std::map<int, int>& oldToNewVertexMap, const TetrahedronRefiner<T>& tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = subVertices.size(); const size_t kInCellCount = subElements.size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector<const Vertex*>& kVertices = subVertices; const std::vector<const Element*>& kElements = subElements; // Copy original vertices #ifdef _OPENMP #pragma omp parallel for #endif // _OPENMP for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i*3], kVertices[i]->coords, sizeof(double)*3); } // The pointer to the new vertices T* newVertices = &m_vertices[kInVertexCount*3]; // Start the actual cell-refinement #ifdef _OPENMP #pragma omp parallel shared(oldToNewVertexMap) { #endif // _OPENMPI Eigen::Matrix<T, 3, 1>* newVerticesTmp = new Eigen::Matrix<T, 3, 1>[additionalVertices]; Tetrahedron<T>* newTetsTmp = new Tetrahedron<T>[kSubCellsPerCell]; #ifdef _OPENMP #pragma omp for schedule(static) nowait #endif // _OPENMP for (size_t c = 0; c < kInCellCount; ++c) { // Build a Terahedron containing the coordinates of the vertices. Tetrahedron<T> inTet = Tetrahedron<T>( kVertices[oldToNewVertexMap.at(kElements[c]->vertices[0])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[1])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[2])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[3])]->coords, oldToNewVertexMap.at(kElements[c]->vertices[0]), oldToNewVertexMap.at(kElements[c]->vertices[1]), oldToNewVertexMap.at(kElements[c]->vertices[2]), oldToNewVertexMap.at(kElements[c]->vertices[3])); // Generate the tets tetRefiner.refine(inTet, kInVertexCount + c*additionalVertices, newTetsTmp, newVerticesTmp); // Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c*additionalVertices + i) * 3], newVerticesTmp[i].data(), sizeof(T)*3); } // Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c*kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c*kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c*kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c*kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete [] newVerticesTmp; delete [] newTetsTmp; #ifdef _OPENMP } #endif } template<typename T> MeshRefiner<T>::~MeshRefiner() { delete [] m_cells; delete [] m_vertices; } //------------------------------------------------------------------------------ template<typename T> const unsigned int* MeshRefiner<T>::getCellData() const { return &m_cells[0]; } template<typename T> std::size_t MeshRefiner<T>::getkSubCellsPerCell() const { return kSubCellsPerCell; } //------------------------------------------------------------------------------ template<typename T> const T* MeshRefiner<T>::getVertexData() const { return &m_vertices[0]; } //------------------------------------------------------------------------------ template<typename T> std::size_t MeshRefiner<T>::getNumCells() const { return m_numSubCells; } //------------------------------------------------------------------------------ template<typename T> std::size_t MeshRefiner<T>::getNumVertices() const { return m_numVertices; } //------------------------------------------------------------------------------ } // namespace } #endif // MESH_REFINER_H_
#ifndef MESH_REFINER_H_ #define MESH_REFINER_H_ #include <cstring> #include "Geometry/MeshReader.h" #include "RefinerUtils.h" namespace seissol { namespace refinement { //------------------------------------------------------------------------------ template < typename T > class MeshRefiner { private: //m_cells contains the indices of the cells unsigned int *m_cells; T *m_vertices; size_t m_numSubCells; size_t m_numVertices; static const unsigned int kIndicesPerCell = 4; const unsigned int kSubCellsPerCell; public: MeshRefiner(const MeshReader & meshReader, const TetrahedronRefiner < T > &tetRefiner); MeshRefiner(const std: : vector < const Element * >&subElements, const std: : vector < const Vertex * >&subVertices, const std: : map < int, int >&oldToNewVertexMap, const TetrahedronRefiner < T > &tetRefiner); ~MeshRefiner(); const unsigned int *getCellData() const; const T *getVertexData() const; std: : size_t getkSubCellsPerCell() const; std: : size_t getNumCells() const; std: : size_t getNumVertices() const; }; //------------------------------------------------------------------------------ template < typename T > MeshRefiner < T >: :MeshRefiner( const MeshReader & meshReader, const TetrahedronRefiner < T > &tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = meshReader.getVertices().size(); const size_t kInCellCount = meshReader.getElements().size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector < Vertex > &kVertices = meshReader.getVertices(); const std::vector < Element > &kElements = meshReader.getElements(); //Copy original vertices for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i * 3], kVertices[i].coords, sizeof(double) * 3); } //The pointer to the new vertices T * newVertices = &m_vertices[kInVertexCount * 3]; //Start the actual cell - refinement Eigen: : Matrix < T, 3, 1 > *newVerticesTmp = new Eigen: : Matrix < T, 3, 1 >[additionalVertices]; Tetrahedron < T > *newTetsTmp = new Tetrahedron < T >[kSubCellsPerCell]; for (size_t c = 0; c < kInCellCount; ++c) { //Build a Terahedron containing the coordinates of the vertices. Tetrahedron < T > inTet = Tetrahedron < T > ( kVertices[kElements[c].vertices[0]].coords, kVertices[kElements[c].vertices[1]].coords, kVertices[kElements[c].vertices[2]].coords, kVertices[kElements[c].vertices[3]].coords, kElements[c].vertices[0], kElements[c].vertices[1], kElements[c].vertices[2], kElements[c].vertices[3]); //Generate the tets tetRefiner.refine(inTet, kInVertexCount + c * additionalVertices, newTetsTmp, newVerticesTmp); //Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c * additionalVertices + i) * 3], newVerticesTmp[i].data(), sizeof(T) * 3); } //Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c * kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c * kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c * kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c * kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete[] newVerticesTmp; delete[] newTetsTmp; } template < typename T > MeshRefiner < T >: :MeshRefiner( const std: : vector < const Element * >&subElements, const std: : vector < const Vertex * >&subVertices, const std: : map < int, int >&oldToNewVertexMap, const TetrahedronRefiner < T > &tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = subVertices.size(); const size_t kInCellCount = subElements.size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector < const Vertex *>&kVertices = subVertices; const std::vector < const Element *>&kElements = subElements; //Copy original vertices for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i * 3], kVertices[i]->coords, sizeof(double) * 3); } //The pointer to the new vertices T * newVertices = &m_vertices[kInVertexCount * 3]; //Start the actual cell - refinement Eigen: : Matrix < T, 3, 1 > *newVerticesTmp = new Eigen: :Matrix < T, 3, 1 >[additionalVertices]; Tetrahedron < T > *newTetsTmp = new Tetrahedron < T >[kSubCellsPerCell]; for (size_t c = 0; c < kInCellCount; ++c) { //Build a Terahedron containing the coordinates of the vertices. Tetrahedron < T > inTet = Tetrahedron < T > ( kVertices[oldToNewVertexMap.at(kElements[c]->vertices[0])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[1])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[2])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[3])]->coords, oldToNewVertexMap.at(kElements[c]->vertices[0]), oldToNewVertexMap.at(kElements[c]->vertices[1]), oldToNewVertexMap.at(kElements[c]->vertices[2]), oldToNewVertexMap.at(kElements[c]->vertices[3])); //Generate the tets tetRefiner.refine(inTet, kInVertexCount + c * additionalVertices, newTetsTmp, newVerticesTmp); //Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c * additionalVertices + i) * 3], newVerticesTmp[i].data(), sizeof(T) * 3); } //Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c * kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c * kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c * kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c * kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete[] newVerticesTmp; delete[] newTetsTmp; } template < typename T > MeshRefiner < T >: :~MeshRefiner() { delete[] m_cells; delete[] m_vertices; } //------------------------------------------------------------------------------ template < typename T > const unsigned int *MeshRefiner < T >::getCellData() const { return &m_cells[0]; } template < typename T > std: : size_t MeshRefiner < T >: : getkSubCellsPerCell() const { return kSubCellsPerCell; } //------------------------------------------------------------------------------ template < typename T > const T *MeshRefiner < T >::getVertexData() const { return &m_vertices[0]; } //------------------------------------------------------------------------------ template < typename T > std: : size_t MeshRefiner < T >: : getNumCells() const { return m_numSubCells; } //------------------------------------------------------------------------------ template < typename T > std: : size_t MeshRefiner < T >: : getNumVertices() const { return m_numVertices; } //------------------------------------------------------------------------------ } //namespace } #endif /* // MESH_REFINER_H_ */
#ifndef MESH_REFINER_H_ #define MESH_REFINER_H_ #include <cstring> #include "Geometry/MeshReader.h" #include "RefinerUtils.h" namespace seissol { namespace refinement { //------------------------------------------------------------------------------ template < typename T > class MeshRefiner { private: //m_cells contains the indices of the cells unsigned int *m_cells; T *m_vertices; size_t m_numSubCells; size_t m_numVertices; static const unsigned int kIndicesPerCell = 4; const unsigned int kSubCellsPerCell; public: MeshRefiner(const MeshReader & meshReader, const TetrahedronRefiner < T > &tetRefiner); MeshRefiner(const std: : vector < const Element * >&subElements, const std: : vector < const Vertex * >&subVertices, const std: : map < int, int >&oldToNewVertexMap, const TetrahedronRefiner < T > &tetRefiner); ~MeshRefiner(); const unsigned int *getCellData() const; const T *getVertexData() const; std: : size_t getkSubCellsPerCell() const; std: : size_t getNumCells() const; std: : size_t getNumVertices() const; }; //------------------------------------------------------------------------------ template < typename T > MeshRefiner < T >: :MeshRefiner( const MeshReader & meshReader, const TetrahedronRefiner < T > &tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = meshReader.getVertices().size(); const size_t kInCellCount = meshReader.getElements().size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector < Vertex > &kVertices = meshReader.getVertices(); const std::vector < Element > &kElements = meshReader.getElements(); //Copy original vertices #ifdef _OPENMP #pragma omp parallel for #endif /* // _OPENMP */ for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i * 3], kVertices[i].coords, sizeof(double) * 3); } //The pointer to the new vertices T * newVertices = &m_vertices[kInVertexCount * 3]; //Start the actual cell - refinement #ifdef _OPENMP #pragma omp parallel { #endif /* // _OPENMPI */ Eigen: : Matrix < T, 3, 1 > *newVerticesTmp = new Eigen: :Matrix < T, 3, 1 >[additionalVertices]; Tetrahedron < T > *newTetsTmp = new Tetrahedron < T >[kSubCellsPerCell]; #ifdef _OPENMP #pragma omp for schedule(static) nowait #endif /* // _OPENMP */ for (size_t c = 0; c < kInCellCount; ++c) { //Build a Terahedron containing the coordinates of the vertices. Tetrahedron < T > inTet = Tetrahedron < T > ( kVertices[kElements[c].vertices[0]].coords, kVertices[kElements[c].vertices[1]].coords, kVertices[kElements[c].vertices[2]].coords, kVertices[kElements[c].vertices[3]].coords, kElements[c].vertices[0], kElements[c].vertices[1], kElements[c].vertices[2], kElements[c].vertices[3]); //Generate the tets tetRefiner.refine(inTet, kInVertexCount + c * additionalVertices, newTetsTmp, newVerticesTmp); //Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c * additionalVertices + i) * 3], newVerticesTmp[i].data(), sizeof(T) * 3); } //Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c * kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c * kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c * kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c * kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete[] newVerticesTmp; delete[] newTetsTmp; #ifdef _OPENMP } #endif } template < typename T > MeshRefiner < T >: :MeshRefiner( const std: : vector < const Element * >&subElements, const std: : vector < const Vertex * >&subVertices, const std: : map < int, int >&oldToNewVertexMap, const TetrahedronRefiner < T > &tetRefiner) : kSubCellsPerCell(tetRefiner.getDivisionCount()) { using std::size_t; const size_t kInVertexCount = subVertices.size(); const size_t kInCellCount = subElements.size(); m_numSubCells = kInCellCount * kSubCellsPerCell; const unsigned int additionalVertices = tetRefiner.additionalVerticesPerCell(); m_numVertices = kInVertexCount + kInCellCount * additionalVertices; m_cells = new unsigned int[m_numSubCells * kIndicesPerCell]; m_vertices = new T[m_numVertices * 3]; const std::vector < const Vertex *>&kVertices = subVertices; const std::vector < const Element *>&kElements = subElements; //Copy original vertices #ifdef _OPENMP #pragma omp parallel for #endif /* // _OPENMP */ for (unsigned int i = 0; i < kInVertexCount; i++) { memcpy(&m_vertices[i * 3], kVertices[i]->coords, sizeof(double) * 3); } //The pointer to the new vertices T * newVertices = &m_vertices[kInVertexCount * 3]; //Start the actual cell - refinement #ifdef _OPENMP #pragma omp parallel shared(oldToNewVertexMap) { #endif /* // _OPENMPI */ Eigen: : Matrix < T, 3, 1 > *newVerticesTmp = new Eigen: :Matrix < T, 3, 1 >[additionalVertices]; Tetrahedron < T > *newTetsTmp = new Tetrahedron < T >[kSubCellsPerCell]; #ifdef _OPENMP #pragma omp for schedule(static) nowait #endif /* // _OPENMP */ for (size_t c = 0; c < kInCellCount; ++c) { //Build a Terahedron containing the coordinates of the vertices. Tetrahedron < T > inTet = Tetrahedron < T > ( kVertices[oldToNewVertexMap.at(kElements[c]->vertices[0])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[1])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[2])]->coords, kVertices[oldToNewVertexMap.at(kElements[c]->vertices[3])]->coords, oldToNewVertexMap.at(kElements[c]->vertices[0]), oldToNewVertexMap.at(kElements[c]->vertices[1]), oldToNewVertexMap.at(kElements[c]->vertices[2]), oldToNewVertexMap.at(kElements[c]->vertices[3])); //Generate the tets tetRefiner.refine(inTet, kInVertexCount + c * additionalVertices, newTetsTmp, newVerticesTmp); //Copy new vertices for (unsigned int i = 0; i < additionalVertices; i++) { memcpy(&newVertices[(c * additionalVertices + i) * 3], newVerticesTmp[i].data(), sizeof(T) * 3); } //Copy tets for (unsigned int i = 0; i < kSubCellsPerCell; i++) { m_cells[(c * kSubCellsPerCell + i) * 4] = newTetsTmp[i].i; m_cells[(c * kSubCellsPerCell + i) * 4 + 1] = newTetsTmp[i].j; m_cells[(c * kSubCellsPerCell + i) * 4 + 2] = newTetsTmp[i].k; m_cells[(c * kSubCellsPerCell + i) * 4 + 3] = newTetsTmp[i].l; } } delete[] newVerticesTmp; delete[] newTetsTmp; #ifdef _OPENMP } #endif } template < typename T > MeshRefiner < T >: :~MeshRefiner() { delete[] m_cells; delete[] m_vertices; } //------------------------------------------------------------------------------ template < typename T > const unsigned int *MeshRefiner < T >::getCellData() const { return &m_cells[0]; } template < typename T > std: : size_t MeshRefiner < T >: : getkSubCellsPerCell() const { return kSubCellsPerCell; } //------------------------------------------------------------------------------ template < typename T > const T *MeshRefiner < T >::getVertexData() const { return &m_vertices[0]; } //------------------------------------------------------------------------------ template < typename T > std: : size_t MeshRefiner < T >: : getNumCells() const { return m_numSubCells; } //------------------------------------------------------------------------------ template < typename T > std: : size_t MeshRefiner < T >: : getNumVertices() const { return m_numVertices; } //------------------------------------------------------------------------------ } //namespace } #endif /* // MESH_REFINER_H_ */
GB_unaryop__lnot_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_int64 // op(A') function: GB_tran__lnot_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_int64 ( float *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_int64 // op(A') function: GB_tran__lnot_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_int64 ( float *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_int64 // op(A') function: GB_tran__lnot_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_int64 ( float *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_concat_sparse_template.c
//------------------------------------------------------------------------------ // GB_concat_sparse_template: concatenate a tile into a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The tile A is hypersparse, sparse, or full, not bitmap. { //-------------------------------------------------------------------------- // get C and the tile A //-------------------------------------------------------------------------- const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; //-------------------------------------------------------------------------- // copy the tile A into C //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(A_nthreads) schedule(static) for (tid = 0 ; tid < A_ntasks ; tid++) { int64_t kfirst = kfirst_Aslice [tid] ; int64_t klast = klast_Aslice [tid] ; for (int64_t k = kfirst ; k <= klast ; k++) { int64_t j = GBH (Ah, k) ; const int64_t pC_start = W [j] ; //------------------------------------------------------------------ // find the part of the kth vector A(:,j) for this task //------------------------------------------------------------------ int64_t pA_start, pA_end ; // as done by GB_get_pA, but also get p0 = Ap [k] const int64_t p0 = GBP (Ap, k, avlen) ; const int64_t p1 = GBP (Ap, k+1, avlen) ; if (k == kfirst) { // First vector for task tid; may only be partially owned. pA_start = pstart_Aslice [tid] ; pA_end = GB_IMIN (p1, pstart_Aslice [tid+1]) ; } else if (k == klast) { // Last vector for task tid; may only be partially owned. pA_start = p0 ; pA_end = pstart_Aslice [tid+1] ; } else { // task tid entirely owns this vector A(:,k). pA_start = p0 ; pA_end = p1 ; } //------------------------------------------------------------------ // append A(:,j) onto C(:,j) //------------------------------------------------------------------ GB_PRAGMA_SIMD for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = GBI (Ai, pA, avlen) ; // i = Ai [pA] int64_t pC = pC_start + pA - p0 ; Ci [pC] = cistart + i ; // Cx [pC] = Ax [pA] ; GB_COPY (pC, pA) ; } } } done = true ; } #undef GB_CTYPE
// ------------------------------------------------------------------------------ //GB_concat_sparse_template:concatenate a tile into a sparse matrix // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2021, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ //The tile A is hypersparse, sparse, or full, not bitmap. { //-------------------------------------------------------------------------- //get C and the tile A // -------------------------------------------------------------------------- const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x; //-------------------------------------------------------------------------- //copy the tile A into C // -------------------------------------------------------------------------- int tid; for (tid = 0; tid < A_ntasks; tid++) { int64_t kfirst = kfirst_Aslice[tid]; int64_t klast = klast_Aslice[tid]; for (int64_t k = kfirst; k <= klast; k++) { int64_t j = GBH(Ah, k); const int64_t pC_start = W[j]; //------------------------------------------------------------------ //find the part of the kth vector A(:,j) for this task // ------------------------------------------------------------------ int64_t pA_start, pA_end; //as done by GB_get_pA, but also get p0 = Ap[k] const int64_t p0 = GBP(Ap, k, avlen); const int64_t p1 = GBP(Ap, k + 1, avlen); if (k == kfirst) { //First vector for task tid; may only be partially owned. pA_start = pstart_Aslice[tid]; pA_end = GB_IMIN(p1, pstart_Aslice[tid + 1]); } else if (k == klast) { //Last vector for task tid; may only be partially owned. pA_start = p0; pA_end = pstart_Aslice[tid + 1]; } else { //task tid entirely owns this vector A(:,k). pA_start = p0; pA_end = p1; } //------------------------------------------------------------------ //append A(:,j) onto C(:, j) // ------------------------------------------------------------------ GB_PRAGMA_SIMD for (int64_t pA = pA_start; pA < pA_end; pA++) { int64_t i = GBI(Ai, pA, avlen); //i = Ai[pA] int64_t pC = pC_start + pA - p0; Ci[pC] = cistart + i; //Cx[pC] = Ax[pA]; GB_COPY(pC, pA); } } } done = true; } #undef GB_CTYPE
// ------------------------------------------------------------------------------ //GB_concat_sparse_template:concatenate a tile into a sparse matrix // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2021, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ //The tile A is hypersparse, sparse, or full, not bitmap. { //-------------------------------------------------------------------------- //get C and the tile A // -------------------------------------------------------------------------- const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x; //-------------------------------------------------------------------------- //copy the tile A into C // -------------------------------------------------------------------------- int tid; #pragma omp parallel for num_threads(A_nthreads) schedule(static) for (tid = 0; tid < A_ntasks; tid++) { int64_t kfirst = kfirst_Aslice[tid]; int64_t klast = klast_Aslice[tid]; for (int64_t k = kfirst; k <= klast; k++) { int64_t j = GBH(Ah, k); const int64_t pC_start = W[j]; //------------------------------------------------------------------ //find the part of the kth vector A(:,j) for this task // ------------------------------------------------------------------ int64_t pA_start, pA_end; //as done by GB_get_pA, but also get p0 = Ap[k] const int64_t p0 = GBP(Ap, k, avlen); const int64_t p1 = GBP(Ap, k + 1, avlen); if (k == kfirst) { //First vector for task tid; may only be partially owned. pA_start = pstart_Aslice[tid]; pA_end = GB_IMIN(p1, pstart_Aslice[tid + 1]); } else if (k == klast) { //Last vector for task tid; may only be partially owned. pA_start = p0; pA_end = pstart_Aslice[tid + 1]; } else { //task tid entirely owns this vector A(:,k). pA_start = p0; pA_end = p1; } //------------------------------------------------------------------ //append A(:,j) onto C(:, j) // ------------------------------------------------------------------ GB_PRAGMA_SIMD for (int64_t pA = pA_start; pA < pA_end; pA++) { int64_t i = GBI(Ai, pA, avlen); //i = Ai[pA] int64_t pC = pC_start + pA - p0; Ci[pC] = cistart + i; //Cx[pC] = Ax[pA]; GB_COPY(pC, pA); } } } done = true; } #undef GB_CTYPE
fatorial.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> long fat(int n) { long res; int i; res = 1; #pragma omp parallel for reduction(*:res) for(i = 2; i <= n; i++){ res *= i; } return res; } int main(int argc, char **argv) { int n; long resultado; if(argc < 2){ printf("uso ./fatorial <numero natural>\n"); exit(1); } n = atoi(argv[1]); if(n < 0){ printf("Erro! Numero de entrada não é natural\n"); exit(1); } // omp_set_num_threads(1); printf("Calculando fatorial de %d com %d threads.\n", n, omp_get_max_threads()); resultado = fat(n); printf("fatorial(%d) = %ld\n", n, resultado); return 0; }
#include <stdio.h> #include <stdlib.h> #include <omp.h> long fat(int n) { long res; int i; res = 1; for (i = 2; i <= n; i++) { res *= i; } return res; } int main(int argc, char **argv) { int n; long resultado; if (argc < 2) { printf("uso ./fatorial <numero natural>\n"); exit(1); } n = atoi(argv[1]); if (n < 0) { printf("Erro! Numero de entrada não é natural\n"); exit(1); } //omp_set_num_threads(1); printf("Calculando fatorial de %d com %d threads.\n", n, omp_get_max_threads()); resultado = fat(n); printf("fatorial(%d) = %ld\n", n, resultado); return 0; }
#include <stdio.h> #include <stdlib.h> #include <omp.h> long fat(int n) { long res; int i; res = 1; #pragma omp parallel for reduction(*:res) for (i = 2; i <= n; i++) { res *= i; } return res; } int main(int argc, char **argv) { int n; long resultado; if (argc < 2) { printf("uso ./fatorial <numero natural>\n"); exit(1); } n = atoi(argv[1]); if (n < 0) { printf("Erro! Numero de entrada não é natural\n"); exit(1); } //omp_set_num_threads(1); printf("Calculando fatorial de %d com %d threads.\n", n, omp_get_max_threads()); resultado = fat(n); printf("fatorial(%d) = %ld\n", n, resultado); return 0; }